Bug Summary

File:programs/pluto/kernel.c
Warning:line 2307, column 4
Value stored to 'outgoing_ref_set' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-redhat-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name kernel.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/build/quick-libreswan-2/programs/pluto -resource-dir /usr/lib64/clang/13.0.0 -D TimeZoneOffset=timezone -D PIE -D NSS_IPSEC_PROFILE -D XFRM_LIFETIME_DEFAULT=30 -D USE_IKEv1 -D XFRM_SUPPORT -D USE_XFRM_INTERFACE -D USE_DNSSEC -D DEFAULT_DNSSEC_ROOTKEY_FILE="/var/lib/unbound/root.key" -D HAVE_LABELED_IPSEC -D HAVE_SECCOMP -D LIBCURL -D USE_LINUX_AUDIT -D HAVE_NM -D USE_PAM_AUTH -D USE_3DES -D USE_AES -D USE_CAMELLIA -D USE_CHACHA -D USE_DH31 -D USE_MD5 -D USE_SHA1 -D USE_SHA2 -D USE_PRF_AES_XCBC -D USE_NSS_KDF -D DEFAULT_RUNDIR="/run/pluto" -D IPSEC_CONF="/etc/ipsec.conf" -D IPSEC_CONFDDIR="/etc/ipsec.d" -D IPSEC_NSSDIR="/var/lib/ipsec/nss" -D IPSEC_CONFDIR="/etc" -D IPSEC_EXECDIR="/usr/local/libexec/ipsec" -D IPSEC_SBINDIR="/usr/local/sbin" -D IPSEC_VARDIR="/var" -D POLICYGROUPSDIR="/etc/ipsec.d/policies" -D IPSEC_SECRETS_FILE="/etc/ipsec.secrets" -D FORCE_PR_ASSERT -D USE_FORK=1 -D USE_VFORK=0 -D USE_DAEMON=0 -D USE_PTHREAD_SETSCHEDPRIO=1 -D GCC_LINT -D HAVE_LIBCAP_NG -I . -I ../../OBJ.linux.x86_64/programs/pluto -I ../../include -I /usr/include/nss3 -I /usr/include/nspr4 -I /home/build/quick-libreswan-2/programs/pluto/linux-copy -D HERE_FILENAME="programs/pluto/kernel.c" -internal-isystem /usr/lib64/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib/gcc/x86_64-redhat-linux/11/../../../../x86_64-redhat-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -std=gnu99 -fdebug-compilation-dir=/home/build/quick-libreswan-2/programs/pluto -ferror-limit 19 -stack-protector 3 -fgnuc-version=4.2.1 -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-01-205714-1273399-1 -x c /home/build/quick-libreswan-2/programs/pluto/kernel.c
1/* routines that interface with the kernel's IPsec mechanism, for libreswan
2 *
3 * Copyright (C) 1997 Angelos D. Keromytis.
4 * Copyright (C) 1998-2010 D. Hugh Redelmeier.
5 * Copyright (C) 2003-2008 Michael Richardson <mcr@xelerance.com>
6 * Copyright (C) 2007-2010 Paul Wouters <paul@xelerance.com>
7 * Copyright (C) 2008-2010 David McCullough <david_mccullough@securecomputing.com>
8 * Copyright (C) 2010 Bart Trojanowski <bart@jukie.net>
9 * Copyright (C) 2009-2010 Tuomo Soini <tis@foobar.fi>
10 * Copyright (C) 2010 Avesh Agarwal <avagarwa@redhat.com>
11 * Copyright (C) 2010-2019 D. Hugh Redelmeier <hugh@mimosa.com>
12 * Copyright (C) 2012-2015 Paul Wouters <paul@libreswan.org>
13 * Copyright (C) 2013 Kim B. Heino <b@bbbs.net>
14 * Copyright (C) 2016-2019 Andrew Cagney <cagney@gnu.org>
15 * Copyright (C) 2019 Paul Wouters <pwouters@redhat.com>
16 * Copyright (C) 2017 Mayank Totale <mtotale@gmail.com>
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms of the GNU General Public License as published by the
20 * Free Software Foundation; either version 2 of the License, or (at your
21 * option) any later version. See <https://www.gnu.org/licenses/gpl2.txt>.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
26 * for more details.
27 */
28
29#include <stddef.h>
30#include <string.h>
31#include <stdio.h>
32#include <stdlib.h>
33#include <errno(*__errno_location ()).h>
34#include <sys/wait.h> /* for WIFEXITED() et.al. */
35#include <unistd.h>
36#include <fcntl.h>
37#include <sys/utsname.h>
38#include <sys/ioctl.h>
39
40#include <sys/stat.h>
41#include <sys/socket.h>
42#include <netinet/in.h>
43#include <arpa/inet.h>
44
45#include <event2/event.h>
46#include <event2/event_struct.h>
47#include <event2/thread.h>
48
49
50#include "sysdep.h"
51#include "constants.h"
52
53#include "defs.h"
54#include "rnd.h"
55#include "id.h"
56#include "connections.h" /* needs id.h */
57#include "state.h"
58#include "timer.h"
59#include "kernel.h"
60#include "kernel_ops.h"
61#include "kernel_xfrm.h"
62#include "packet.h"
63#include "x509.h"
64#include "pluto_x509.h"
65#include "certs.h"
66#include "secrets.h"
67#include "log.h"
68#include "server.h"
69#include "whack.h" /* for RC_LOG_SERIOUS */
70#include "keys.h"
71#include "ike_alg.h"
72#include "ike_alg_encrypt.h"
73#include "ike_alg_integ.h"
74
75#include "packet.h" /* for pb_stream in nat_traversal.h */
76#include "nat_traversal.h"
77#include "ip_address.h"
78#include "ip_info.h"
79#include "lswfips.h" /* for libreswan_fipsmode() */
80# include "kernel_xfrm_interface.h"
81#include "iface.h"
82#include "ip_selector.h"
83#include "ip_encap.h"
84#include "show.h"
85
86static bool_Bool route_and_eroute(struct connection *c,
87 struct spd_route *sr,
88 struct state *st,
89 /* st or c */
90 struct logger *logger);
91
92static bool_Bool eroute_connection(enum kernel_policy_op op, const char *opname,
93 const struct spd_route *sr,
94 ipsec_spi_t cur_spi,
95 ipsec_spi_t new_spi,
96 const struct kernel_route *route,
97 enum eroute_type esatype,
98 const struct kernel_encap *encap,
99 uint32_t sa_priority,
100 const struct sa_marks *sa_marks,
101 const uint32_t xfrm_if_id,
102 shunk_t sec_label,
103 struct logger *logger);
104
105bool_Bool can_do_IPcomp = true1; /* can system actually perform IPCOMP? */
106
107static global_timer_cb kernel_scan_shunts;
108static bool_Bool invoke_command(const char *verb, const char *verb_suffix,
109 const char *cmd, struct logger *logger);
110
111/* test if the routes required for two different connections agree
112 * It is assumed that the destination subnets agree; we are only
113 * testing that the interfaces and nexthops match.
114 */
115#define routes_agree(c, d)((c)->interface->ip_dev == (d)->interface->ip_dev
&& sameaddr(&(c)->spd.this.host_nexthop, &
(d)->spd.this.host_nexthop))
\
116 ((c)->interface->ip_dev == (d)->interface->ip_dev && \
117 sameaddr(&(c)->spd.this.host_nexthop, &(d)->spd.this.host_nexthop))
118
119const struct kernel_encap esp_transport_kernel_encap = {
120 .outer = 0,
121 .inner_proto = &ip_protocol_espip_protocols[IPPROTO_ESP],
122 .mode = ENCAP_MODE_TRANSPORT,
123 .rule[0] = {
124 .proto = ENCAP_PROTO_ESP,
125 .reqid = 0
126 },
127};
128
129struct bare_shunt {
130 policy_prio_t policy_prio;
131 ip_selector our_client;
132 ip_selector peer_client;
133 ip_said said;
134 int transport_proto; /* XXX: same value in local/remote */
135 unsigned long count;
136 monotime_t last_activity;
137
138 /*
139 * Note: "why" must be in stable storage (not auto, not heap)
140 * because we use it indefinitely without copying or pfreeing.
141 * Simple rule: use a string literal.
142 */
143 const char *why;
144
145 /* the connection from where it came - used to re-load /32 conns */
146 co_serial_t from_serialno;
147
148 struct bare_shunt *next;
149};
150
151static struct bare_shunt *bare_shunts = NULL((void*)0);
152
153#ifdef IPSEC_CONNECTION_LIMIT
154static int num_ipsec_eroute = 0;
155#endif
156
157static void jam_bare_shunt(struct jambuf *buf, const struct bare_shunt *bs)
158{
159 jam(buf, "bare shunt %p ", bs);
160 jam_selector(buf, &bs->our_client);
161 jam(buf, " --%d--> ", bs->transport_proto);
162 jam_selector(buf, &bs->peer_client);
163 jam(buf, " => ");
164 jam_said(buf, &bs->said);
165 jam(buf, " ");
166 jam_policy_prio(buf, bs->policy_prio);
167 jam(buf, " %s", bs->why);
168}
169
170static void llog_bare_shunt(lset_t rc_flags, struct logger *logger,
171 const struct bare_shunt *bs, const char *op)
172{
173 LLOG_JAMBUF(rc_flags, logger, buf)for (char lswbuf[((size_t)1024)], *lswbuf_ = lswbuf; lswbuf_ !=
((void*)0); lswbuf_ = ((void*)0)) for (struct jambuf jambuf =
array_as_jambuf((lswbuf), sizeof(lswbuf)), *buf = &jambuf
; buf != ((void*)0); buf = ((void*)0)) for (({ if (((rc_flags
) & NO_PREFIX) == ((lset_t)0) && (((rc_flags) &
STREAM_MASK) != DEBUG_STREAM || (cur_debugging & (((lset_t
)1 << (DBG_ADD_PREFIX_IX)))))) { (logger)->object_vec
->jam_object_prefix(buf, (logger)->object); } }); buf !=
((void*)0); jambuf_to_logger(buf, (logger), rc_flags), buf =
((void*)0))
{
174 jam(buf, "%s ", op);
175 jam_bare_shunt(buf, bs);
176 }
177}
178
179static void dbg_bare_shunt(const char *op, const struct bare_shunt *bs)
180{
181 LSWDBGP(DBG_BASE, buf)for (_Bool lswlog_p = (cur_debugging & (((lset_t)1 <<
(DBG_BASE_IX)))); lswlog_p; lswlog_p = 0) for (char lswbuf[(
(size_t)1024)], *lswbuf_ = lswbuf; lswbuf_ != ((void*)0); lswbuf_
= ((void*)0)) for (struct jambuf jambuf = array_as_jambuf((lswbuf
), sizeof(lswbuf)), *buf = &jambuf; buf != ((void*)0); buf
= ((void*)0)) for (; buf != ((void*)0); jambuf_to_logger(buf
, &failsafe_logger, DEBUG_STREAM), buf = ((void*)0))
{
182 jam(buf, "%s ", op);
183 jam_bare_shunt(buf, bs);
184 }
185}
186
187/*
188 * Note: "why" must be in stable storage (not auto, not heap)
189 * because we use it indefinitely without copying or pfreeing.
190 * Simple rule: use a string literal.
191 */
192void add_bare_shunt(const ip_selector *our_client,
193 const ip_selector *peer_client,
194 int transport_proto, ipsec_spi_t shunt_spi,
195 const char *why, struct logger *logger)
196{
197 /* report any duplication; this should NOT happen */
198 struct bare_shunt **bspp = bare_shunt_ptr(our_client, peer_client, transport_proto, why);
199
200 if (bspp != NULL((void*)0)) {
201 /* maybe: passert(bsp == NULL); */
202 llog_bare_shunt(RC_LOG, logger, *bspp,
203 "CONFLICTING existing");
204 }
205
206 struct bare_shunt *bs = alloc_thing(struct bare_shunt, "bare shunt")((struct bare_shunt*) alloc_bytes(sizeof(struct bare_shunt), (
"bare shunt")))
;
207
208 bs->why = why;
209 bs->our_client = *our_client;
210 bs->peer_client = *peer_client;
211 bs->transport_proto = transport_proto;
212 bs->policy_prio = BOTTOM_PRIO((policy_prio_t)0);
213
214 bs->said = said_from_address_protocol_spi(selector_type(our_client)->address.any,
215 &ip_protocol_internalip_protocols[61],
216 htonl(shunt_spi));
217 bs->count = 0;
218 bs->last_activity = mononow();
219
220 bs->next = bare_shunts;
221 bare_shunts = bs;
222 dbg_bare_shunt("add", bs);
223
224 /* report duplication; this should NOT happen */
225 if (bspp != NULL((void*)0)) {
226 llog_bare_shunt(RC_LOG, logger, bs,
227 "CONFLICTING new");
228 }
229}
230
231static reqid_t get_proto_reqid(reqid_t base, const struct ip_protocol *proto)
232{
233 if (proto == &ip_protocol_compip_protocols[IPPROTO_COMP])
234 return reqid_ipcomp(base);
235
236 if (proto == &ip_protocol_espip_protocols[IPPROTO_ESP])
237 return reqid_esp(base);
238
239 if (proto == &ip_protocol_ahip_protocols[IPPROTO_AH])
240 return reqid_ah(base);
241
242 PASSERT_FAIL("bad protocol %s", proto->name)llog_passert(&failsafe_logger, ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 242, }; &here; }), "bad protocol %s", proto->
name)
;
243}
244
245static const char *said_str(const ip_address dst,
246 const struct ip_protocol *sa_proto,
247 ipsec_spi_t spi,
248 said_buf *buf)
249{
250 ip_said said = said_from_address_protocol_spi(dst, sa_proto, spi);
251 return str_said(&said, buf);
252}
253
254/* Generate Unique SPI numbers.
255 *
256 * The specs say that the number must not be less than IPSEC_DOI_SPI_MIN.
257 * Pluto generates numbers not less than IPSEC_DOI_SPI_OUR_MIN,
258 * reserving numbers in between for manual keying (but we cannot so
259 * restrict numbers generated by our peer).
260 * XXX This should be replaced by a call to the kernel when
261 * XXX we get an API.
262 * The returned SPI is in network byte order.
263 * We use a random number as the initial SPI so that there is
264 * a good chance that different Pluto instances will choose
265 * different SPIs. This is good for two reasons.
266 * - the keying material for the initiator and responder only
267 * differs if the SPIs differ.
268 * - if Pluto is restarted, it would otherwise recycle the SPI
269 * numbers and confuse everything. When the kernel generates
270 * SPIs, this will no longer matter.
271 * We then allocate numbers sequentially. Thus we don't have to
272 * check if the number was previously used (assuming that no
273 * SPI lives longer than 4G of its successors).
274 */
275ipsec_spi_t get_ipsec_spi(ipsec_spi_t avoid,
276 const struct ip_protocol *proto,
277 const struct spd_route *sr,
278 bool_Bool tunnel,
279 struct logger *logger)
280{
281 passert(proto == &ip_protocol_ah || proto == &ip_protocol_esp)({ _Bool assertion__ = proto == &ip_protocols[IPPROTO_AH]
|| proto == &ip_protocols[IPPROTO_ESP]; if (!assertion__
) { where_t here = ({ static const struct where here = { .func
= __func__, .file = "programs/pluto/kernel.c", .line = 281, }
; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_passert(logger_, here, "%s", "proto == &ip_protocols[IPPROTO_AH] || proto == &ip_protocols[IPPROTO_ESP]"
); } (void) 1; })
;
282
283 ipsec_spi_t network_spi;
284 if (kernel_ops->get_spi != NULL((void*)0)) {
285 said_buf sb;
286 network_spi = kernel_ops->get_spi(&sr->that.host_addr,
287 &sr->this.host_addr, proto, tunnel,
288 get_proto_reqid(sr->reqid, proto),
289 IPSEC_DOI_SPI_OUR_MIN0x1000, 0xffffffffU,
290 said_str(sr->this.host_addr, proto, 0, &sb),
291 logger);
292 } else {
293 static ipsec_spi_t host_spi; /* host order, so not returned directly! */
294 do {
295 get_rnd_bytes(&host_spi, sizeof(host_spi));
296 network_spi = htonl(host_spi);
297 } while (host_spi < IPSEC_DOI_SPI_OUR_MIN0x1000 || network_spi == avoid);
298 }
299
300 said_buf sb;
301 address_buf rb;
302 dbg("kernel: allocated incoming spi %s -> %s%s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: allocated incoming spi %s -> %s%s"
, str_address(&sr->that.host_addr, &rb), said_str(
sr->this.host_addr, proto, network_spi, &sb), tunnel ?
" in tunnel-mode" : ""); } }
303 str_address(&sr->that.host_addr, &rb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: allocated incoming spi %s -> %s%s"
, str_address(&sr->that.host_addr, &rb), said_str(
sr->this.host_addr, proto, network_spi, &sb), tunnel ?
" in tunnel-mode" : ""); } }
304 said_str(sr->this.host_addr, proto, network_spi, &sb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: allocated incoming spi %s -> %s%s"
, str_address(&sr->that.host_addr, &rb), said_str(
sr->this.host_addr, proto, network_spi, &sb), tunnel ?
" in tunnel-mode" : ""); } }
305 tunnel ? " in tunnel-mode" : ""){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: allocated incoming spi %s -> %s%s"
, str_address(&sr->that.host_addr, &rb), said_str(
sr->this.host_addr, proto, network_spi, &sb), tunnel ?
" in tunnel-mode" : ""); } }
;
306 return network_spi;
307}
308
309/* Generate Unique CPI numbers.
310 * The result is returned as an SPI (4 bytes) in network order!
311 * The real bits are in the nework-low-order 2 bytes.
312 * Modelled on get_ipsec_spi, but range is more limited:
313 * 256-61439.
314 * If we can't find one easily, return 0 (a bad SPI,
315 * no matter what order) indicating failure.
316 */
317ipsec_spi_t get_my_cpi(const struct spd_route *sr, bool_Bool tunnel,
318 struct logger *logger)
319{
320 if (kernel_ops->get_spi != NULL((void*)0)) {
321 said_buf sb;
322 return kernel_ops->get_spi(&sr->that.host_addr,
323 &sr->this.host_addr, &ip_protocol_compip_protocols[IPPROTO_COMP],
324 tunnel,
325 get_proto_reqid(sr->reqid, &ip_protocol_compip_protocols[IPPROTO_COMP]),
326 IPCOMP_FIRST_NEGOTIATED256,
327 IPCOMP_LAST_NEGOTIATED61439,
328 said_str(sr->this.host_addr, &ip_protocol_compip_protocols[IPPROTO_COMP], 0, &sb),
329 logger);
330 } else {
331 static cpi_t first_busy_cpi = 0;
332 static cpi_t latest_cpi = 0;
333
334 while (!(IPCOMP_FIRST_NEGOTIATED256 <= first_busy_cpi &&
335 first_busy_cpi < IPCOMP_LAST_NEGOTIATED61439)) {
336 get_rnd_bytes((uint8_t *)&first_busy_cpi,
337 sizeof(first_busy_cpi));
338 latest_cpi = first_busy_cpi;
339 }
340
341 latest_cpi++;
342
343 if (latest_cpi == first_busy_cpi)
344 find_my_cpi_gap(&latest_cpi, &first_busy_cpi);
345
346 if (latest_cpi > IPCOMP_LAST_NEGOTIATED61439)
347 latest_cpi = IPCOMP_FIRST_NEGOTIATED256;
348
349 return htonl((ipsec_spi_t)latest_cpi);
350 }
351}
352
353/*
354 * Remove all characters but [-_.0-9a-zA-Z] from a character string.
355 * Truncates the result if it would be too long.
356 */
357
358static void jam_clean_xauth_username(struct jambuf *buf,
359 const char *src,
360 struct logger *logger)
361{
362 bool_Bool changed = false0;
363 const char *dst = jambuf_cursor(buf);
364 while (*src != '\0') {
365 if ((*src >= '0' && *src <= '9') ||
366 (*src >= 'a' && *src <= 'z') ||
367 (*src >= 'A' && *src <= 'Z') ||
368 *src == '_' || *src == '-' || *src == '.') {
369 jam_char(buf, *src);
370 } else {
371 changed = true1;
372 }
373 src++;
374 }
375 if (changed || !jambuf_ok(buf)) {
376 llog(RC_LOG, logger,
377 "Warning: XAUTH username changed from '%s' to '%s'",
378 src, dst);
379 }
380}
381
382/*
383 * form the command string
384 *
385 * note: this mutates *st by calling get_sa_info().
386 */
387static void jam_common_shell_out(struct jambuf *buf, const struct connection *c,
388 const struct spd_route *sr, struct state *st,
389 bool_Bool inbytes, bool_Bool outbytes)
390{
391 ip_address ta;
392
393 char *id_vname = NULL((void*)0);
394
395 if (c->xfrmi != NULL((void*)0) && c->xfrmi->name != NULL((void*)0))
396 id_vname = c->xfrmi->name;
397 else
398 id_vname = "NULL";
399
400 jam(buf, "PLUTO_CONNECTION='%s' ", c->name);
401 jam(buf, "PLUTO_CONNECTION_TYPE='%s' ", LIN(POLICY_TUNNEL, c->policy)(((((lset_t)1 << (POLICY_TUNNEL_IX))) & (c->policy
)) == (((lset_t)1 << (POLICY_TUNNEL_IX))))
? "tunnel" : "transport");
402 jam(buf, "PLUTO_VIRT_INTERFACE='%s' ", id_vname);
403 jam(buf, "PLUTO_INTERFACE='%s' ", c->interface == NULL((void*)0) ? "NULL" : c->interface->ip_dev->id_rname);
404 jam(buf, "PLUTO_XFRMI_ROUTE='%s' ", (c->xfrmi != NULL((void*)0) && c->xfrmi->if_id > 0) ? "yes" : "");
405
406 if (address_is_specified(sr->this.host_nexthop)) {
407 jam(buf, "PLUTO_NEXT_HOP='");
408 jam_address(buf, &sr->this.host_nexthop);
409 jam(buf, "' ");
410 }
411
412 ipstr_buf bme;
413 jam(buf, "PLUTO_ME='%s' ", ipstr(&sr->this.host_addr, &bme));
414
415 jam(buf, "PLUTO_MY_ID='");
416 jam_id_bytes(buf, &sr->this.id, jam_shell_quoted_bytes);
417 jam(buf, "' ");
418
419 jam(buf, "PLUTO_MY_CLIENT='");
420 jam_selector_subnet(buf, &sr->this.client);
421 jam(buf, "' ");
422
423 jam(buf, "PLUTO_MY_CLIENT_NET='");
424 ta = selector_prefix(sr->this.client);
425 jam_address(buf, &ta);
426 jam(buf, "' ");
427
428 jam(buf, "PLUTO_MY_CLIENT_MASK='");
429 ta = selector_prefix_mask(sr->this.client);
430 jam_address(buf, &ta);
431 jam(buf, "' ");
432
433 if (cidr_is_specified(sr->this.host_vtiip)) {
434 jam(buf, "VTI_IP='");
435 jam_cidr(buf, &sr->this.host_vtiip);
436 jam(buf, "' ");
437 }
438
439 if (cidr_is_specified(sr->this.ifaceip)) {
440 jam(buf, "INTERFACE_IP='");
441 jam_cidr(buf, &sr->this.ifaceip);
442 jam(buf, "' ");
443 }
444
445 jam(buf, "PLUTO_MY_PORT='%u' ", sr->this.port);
446 jam(buf, "PLUTO_MY_PROTOCOL='%u' ", sr->this.protocol);
447 jam(buf, "PLUTO_SA_REQID='%u' ", sr->reqid);
448 jam(buf, "PLUTO_SA_TYPE='%s' ", (st == NULL((void*)0) ? "none" :
449 st->st_esp.present ? "ESP" :
450 st->st_ah.present ? "AH" :
451 st->st_ipcomp.present ? "IPCOMP" :
452 "unknown?"));
453 ipstr_buf bpeer;
454 jam(buf, "PLUTO_PEER='%s' ", ipstr(&sr->that.host_addr, &bpeer));
455
456 jam(buf, "PLUTO_PEER_ID='");
457 jam_id_bytes(buf, &sr->that.id, jam_shell_quoted_bytes);
458 jam(buf, "' ");
459
460 /* for transport mode, things are complicated */
461 jam(buf, "PLUTO_PEER_CLIENT='");
462 if (!LIN(POLICY_TUNNEL, c->policy)(((((lset_t)1 << (POLICY_TUNNEL_IX))) & (c->policy
)) == (((lset_t)1 << (POLICY_TUNNEL_IX))))
&& (st != NULL((void*)0) && LHAS(st->hidden_variables.st_nat_traversal, NATED_PEER)(((st->hidden_variables.st_nat_traversal) & ((lset_t)1
<< (NATED_PEER))) != ((lset_t)0))
)) {
463 jam(buf, "%s' ", ipstr(&sr->that.host_addr, &bpeer));
464 } else {
465 jam_selector_subnet(buf, &sr->that.client);
466 jam(buf, "' ");
467 }
468
469 jam(buf, "PLUTO_PEER_CLIENT_NET='");
470 if (!LIN(POLICY_TUNNEL, c->policy)(((((lset_t)1 << (POLICY_TUNNEL_IX))) & (c->policy
)) == (((lset_t)1 << (POLICY_TUNNEL_IX))))
&& (st != NULL((void*)0) && LHAS(st->hidden_variables.st_nat_traversal, NATED_PEER)(((st->hidden_variables.st_nat_traversal) & ((lset_t)1
<< (NATED_PEER))) != ((lset_t)0))
)) {
471 jam(buf, "%s' ", ipstr(&sr->that.host_addr, &bpeer));
472 } else {
473 ta = selector_prefix(sr->that.client);
474 jam_address(buf, &ta);
475 jam(buf, "' ");
476 }
477
478 jam(buf, "PLUTO_PEER_CLIENT_MASK='");
479 ta = selector_prefix_mask(sr->that.client);
480 jam_address(buf, &ta);
481 jam(buf, "' ");
482
483 jam(buf, "PLUTO_PEER_PORT='%u' ", sr->that.port);
484 jam(buf, "PLUTO_PEER_PROTOCOL='%u' ", sr->that.protocol);
485
486 jam(buf, "PLUTO_PEER_CA='");
487 for (struct pubkey_list *p = pluto_pubkeys; p != NULL((void*)0); p = p->next) {
488 struct pubkey *key = p->key;
489 int pathlen; /* value ignored */
490 if (key->type == &pubkey_type_rsa &&
491 same_id(&sr->that.id, &key->id) &&
492 trusted_ca_nss(key->issuer, sr->that.ca, &pathlen)) {
493 jam_dn_or_null(buf, key->issuer, "", jam_shell_quoted_bytes);
494 break;
495 }
496 }
497 jam(buf, "' ");
498
499 jam(buf, "PLUTO_STACK='%s' ", kernel_ops->kern_name);
500
501 if (c->metric != 0) {
502 jam(buf, "PLUTO_METRIC=%d ", c->metric);
503 }
504
505 if (c->connmtu != 0) {
506 jam(buf, "PLUTO_MTU=%d ", c->connmtu);
507 }
508
509 jam(buf, "PLUTO_ADDTIME='%" PRIu64"l" "u" "' ", st == NULL((void*)0) ? (uint64_t)0 : st->st_esp.add_time);
510
511 jam(buf, "PLUTO_CONN_POLICY='");
512 jam_policy(buf, c->policy);
513 if (NEVER_NEGOTIATE(c->policy)(((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX
)) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t
)0)))
) {
514 jam(buf, "+NEVER_NEGOTIATE");
515 }
516 jam(buf, "' ");
517
518 jam(buf, "PLUTO_CONN_KIND='");
519 jam_enum(buf, &connection_kind_names, c->kind);
520 jam(buf,"' ");
521
522 jam(buf, "PLUTO_CONN_ADDRFAMILY='ipv%d' ", address_type(&sr->this.host_addr)->ip_version);
523 jam(buf, "XAUTH_FAILED=%d ", (st != NULL((void*)0) && st->st_xauth_soft) ? 1 : 0);
524
525 if (st != NULL((void*)0) && st->st_xauth_username[0] != '\0') {
526 jam(buf, "PLUTO_USERNAME='");
527 jam_clean_xauth_username(buf, st->st_xauth_username, st->st_logger);
528 jam(buf, "' ");
529 }
530
531 if (address_is_specified(sr->this.host_srcip)) {
532 jam(buf, "PLUTO_MY_SOURCEIP='");
533 jam_address(buf, &sr->this.host_srcip);
534 jam(buf, "' ");
535 if (st != NULL((void*)0))
536 jam(buf, "PLUTO_MOBIKE_EVENT='%s' ",
537 st->st_mobike_del_src_ip ? "yes" : "");
538 }
539
540 jam(buf, "PLUTO_IS_PEER_CISCO='%u' ", c->remotepeertype /* ??? kind of odd printing an enum with %u */);
541 jam(buf, "PLUTO_PEER_DNS_INFO='%s' ", (st != NULL((void*)0) && st->st_seen_cfg_dns != NULL((void*)0)) ? st->st_seen_cfg_dns : "");
542 jam(buf, "PLUTO_PEER_DOMAIN_INFO='%s' ", (st != NULL((void*)0) && st->st_seen_cfg_domains != NULL((void*)0)) ? st->st_seen_cfg_domains : "");
543 jam(buf, "PLUTO_PEER_BANNER='%s' ", (st != NULL((void*)0) && st->st_seen_cfg_banner != NULL((void*)0)) ? st->st_seen_cfg_banner : "");
544 jam(buf, "PLUTO_CFG_SERVER='%u' ", sr->this.modecfg_server);
545 jam(buf, "PLUTO_CFG_CLIENT='%u' ", sr->this.modecfg_client);
546#ifdef HAVE_NM1
547 jam(buf, "PLUTO_NM_CONFIGURED='%u' ", c->nmconfigured);
548#endif
549
550 if (inbytes) {
551 jam(buf, "PLUTO_INBYTES='%" PRIu64"l" "u" "' ",
552 st->st_esp.present ? st->st_esp.our_bytes :
553 st->st_ah.present ? st->st_ah.our_bytes :
554 st->st_ipcomp.present ? st->st_ipcomp.our_bytes :
555 0);
556 }
557 if (outbytes) {
558 jam(buf, "PLUTO_OUTBYTES='%" PRIu64"l" "u" "' ",
559 st->st_esp.present ? st->st_esp.peer_bytes :
560 st->st_ah.present ? st->st_ah.peer_bytes :
561 st->st_ipcomp.present ? st->st_ipcomp.peer_bytes :
562 0);
563 }
564
565 if (c->nflog_group != 0) {
566 jam(buf, "NFLOG=%d ", c->nflog_group);
567 }
568
569 if (c->sa_marks.in.val != 0) {
570 jam(buf, "CONNMARK_IN=%" PRIu32"u" "/%#08" PRIx32"x" " ",
571 c->sa_marks.in.val, c->sa_marks.in.mask);
572 }
573 if (c->sa_marks.out.val != 0 && c->xfrmi == NULL((void*)0)) {
574 jam(buf, "CONNMARK_OUT=%" PRIu32"u" "/%#08" PRIx32"x" " ",
575 c->sa_marks.out.val, c->sa_marks.out.mask);
576 }
577 if (c->xfrmi != NULL((void*)0)) {
578 if (c->sa_marks.out.val != 0) {
579 /* user configured XFRMI_SET_MARK (a.k.a. output mark) add it */
580 jam(buf, "PLUTO_XFRMI_FWMARK='%" PRIu32"u" "/%#08" PRIx32"x" "' ",
581 c->sa_marks.out.val, c->sa_marks.out.mask);
582 } else if (address_in_selector_range(sr->that.host_addr, sr->that.client)) {
583 jam(buf, "PLUTO_XFRMI_FWMARK='%" PRIu32"u" "/0xffffffff' ",
584 c->xfrmi->if_id);
585 } else {
586 address_buf bpeer;
587 selector_buf peerclient_str;
588 dbg("not adding PLUTO_XFRMI_FWMARK. PLUTO_PEER=%s is not inside PLUTO_PEER_CLIENT=%s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("not adding PLUTO_XFRMI_FWMARK. PLUTO_PEER=%s is not inside PLUTO_PEER_CLIENT=%s"
, str_address(&sr->that.host_addr, &bpeer), str_selector
(&sr->that.client, &peerclient_str)); } }
589 str_address(&sr->that.host_addr, &bpeer),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("not adding PLUTO_XFRMI_FWMARK. PLUTO_PEER=%s is not inside PLUTO_PEER_CLIENT=%s"
, str_address(&sr->that.host_addr, &bpeer), str_selector
(&sr->that.client, &peerclient_str)); } }
590 str_selector(&sr->that.client, &peerclient_str)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("not adding PLUTO_XFRMI_FWMARK. PLUTO_PEER=%s is not inside PLUTO_PEER_CLIENT=%s"
, str_address(&sr->that.host_addr, &bpeer), str_selector
(&sr->that.client, &peerclient_str)); } }
;
591 jam(buf, "PLUTO_XFRMI_FWMARK='' ");
592 }
593 }
594 jam(buf, "VTI_IFACE='%s' ", c->vti_iface ? c->vti_iface : "");
595 jam(buf, "VTI_ROUTING='%s' ", bool_str(c->vti_routing));
596 jam(buf, "VTI_SHARED='%s' ", bool_str(c->vti_shared));
597
598 if (sr->this.has_cat) {
599 jam(buf, "CAT='YES' ");
600 }
601
602 jam(buf, "SPI_IN=0x%x SPI_OUT=0x%x " /* SPI_IN SPI_OUT */,
603 (st == NULL((void*)0) ? 0 : st->st_esp.present ? ntohl(st->st_esp.attrs.spi) :
604 st->st_ah.present ? ntohl(st->st_ah.attrs.spi) :
605 st->st_ipcomp.present ? ntohl(st->st_ipcomp.attrs.spi) : 0),
606 (st == NULL((void*)0) ? 0 : st->st_esp.present ? ntohl(st->st_esp.our_spi) :
607 st->st_ah.present ? ntohl(st->st_ah.our_spi) :
608 st->st_ipcomp.present ? ntohl(st->st_ipcomp.our_spi) : 0));
609}
610
611/*
612 * form the command string
613 *
614 * note: this mutates *st by calling fmt_traffic_str
615 */
616bool_Bool fmt_common_shell_out(char *buf, size_t blen, const struct connection *c,
617 const struct spd_route *sr, struct state *st)
618{
619 /*
620 * note: this mutates *st by calling get_sa_info
621 *
622 * XXX: does the get_sa_info() call order matter? Should this
623 * be a single "atomic" call?
624 *
625 * true==inbound: inbound updates OUR_BYTES; !inbound updates
626 * PEER_BYTES.
627 */
628 bool_Bool outbytes = st != NULL((void*)0) && get_sa_info(st, false0, NULL((void*)0));
629 bool_Bool inbytes = st != NULL((void*)0) && get_sa_info(st, true1, NULL((void*)0));
630 struct jambuf jambuf = array_as_jambuf(buf, blen);
631 jam_common_shell_out(&jambuf, c, sr, st, inbytes, outbytes);
632 return jambuf_ok(&jambuf);
633}
634
635bool_Bool do_command(const struct connection *c,
636 const struct spd_route *sr,
637 const char *verb,
638 struct state *st,
639 /* either st, or c's logger */
640 struct logger *logger)
641{
642 const char *verb_suffix;
643
644 /*
645 * Support for skipping updown, eg leftupdown=""
646 * Useful on busy servers that do not need to use updown for anything
647 */
648 if (sr->this.updown == NULL((void*)0) || streq(sr->this.updown, "%disabled")(strcmp((sr->this.updown), ("%disabled")) == 0)) {
649 dbg("kernel: skipped updown %s command - disabled per policy", verb){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: skipped updown %s command - disabled per policy"
, verb); } }
;
650 return true1;
651 }
652 dbg("kernel: running updown command \"%s\" for verb %s ", sr->this.updown, verb){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: running updown command \"%s\" for verb %s "
, sr->this.updown, verb); } }
;
653
654 /*
655 * Figure out which verb suffix applies.
656 * NOTE: this is a duplicate of code in mast_do_command_vs.
657 */
658 {
659 const char *hs, *cs;
660
661 switch (addrtypeof(&sr->this.host_addr)) {
662 case AF_INET2:
663 hs = "-host";
664 cs = "-client";
665 break;
666 case AF_INET610:
667 hs = "-host-v6";
668 cs = "-client-v6";
669 break;
670 default:
671 llog(RC_LOG_SERIOUS, logger, "unknown address family");
672 return false0;
673 }
674 verb_suffix = selector_range_eq_address(sr->this.client, sr->this.host_addr) ? hs : cs;
675 }
676
677 dbg("kernel: command executing %s%s", verb, verb_suffix){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: command executing %s%s", verb, verb_suffix
); } }
;
678
679 char common_shell_out_str[2048];
680 if (!fmt_common_shell_out(common_shell_out_str,
681 sizeof(common_shell_out_str), c, sr,
682 st)) {
683 llog(RC_LOG_SERIOUS, logger,
684 "%s%s command too long!", verb,
685 verb_suffix);
686 return false0;
687 }
688
689 /* must free */
690 char *cmd = alloc_printf("2>&1 " /* capture stderr along with stdout */
691 "PLUTO_VERB='%s%s' "
692 "%s" /* other stuff */
693 "%s", /* actual script */
694 verb, verb_suffix,
695 common_shell_out_str,
696 sr->this.updown);
697 if (cmd == NULL((void*)0)) {
698 llog(RC_LOG_SERIOUS, logger,
699 "%s%s command too long!", verb,
700 verb_suffix);
701 return false0;
702 }
703
704 bool_Bool ok = invoke_command(verb, verb_suffix, cmd, logger);
705 pfree(cmd);
706 return ok;
707}
708
709bool_Bool invoke_command(const char *verb, const char *verb_suffix, const char *cmd,
710 struct logger *logger)
711{
712# define CHUNK_WIDTH 80 /* units for cmd logging */
713 if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) {
714 int slen = strlen(cmd);
715 int i;
716
717 DBG_log("executing %s%s: %s",
718 verb, verb_suffix, cmd);
719 DBG_log("popen cmd is %d chars long", slen);
720 for (i = 0; i < slen; i += CHUNK_WIDTH)
721 DBG_log("cmd(%4d):%.*s:", i,
722 slen-i < CHUNK_WIDTH? slen-i : CHUNK_WIDTH,
723 &cmd[i]);
724 }
725# undef CHUNK_WIDTH
726
727
728 {
729 /*
730 * invoke the script, catching stderr and stdout
731 * It may be of concern that some file descriptors will
732 * be inherited. For the ones under our control, we
733 * have done fcntl(fd, F_SETFD, FD_CLOEXEC) to prevent this.
734 * Any used by library routines (perhaps the resolver or
735 * syslog) will remain.
736 */
737 FILE *f = popen(cmd, "r");
738
739 if (f == NULL((void*)0)) {
740#ifdef HAVE_BROKEN_POPEN
741 /*
742 * See bug #1067 Angstrom Linux on a arm7 has no
743 * popen()
744 */
745 if (errno(*__errno_location ()) == ENOSYS38) {
746 /*
747 * Try system(), though it will not give us
748 * output
749 */
750 DBG_log("unable to popen(), falling back to system()");
751 system(cmd);
752 return true1;
753 }
754#endif
755 llog(RC_LOG_SERIOUS, logger,
756 "unable to popen %s%s command",
757 verb, verb_suffix);
758 return false0;
759 }
760
761 /* log any output */
762 for (;; ) {
763 /*
764 * if response doesn't fit in this buffer, it will
765 * be folded
766 */
767 char resp[256];
768
769 if (fgets(resp, sizeof(resp), f) == NULL((void*)0)) {
770 if (ferror(f)) {
771 log_errno(logger, errno,{ int e_ = (*__errno_location ()); log_error(logger, e_, "fgets failed on output of %s%s command"
, verb, verb_suffix); }
772 "fgets failed on output of %s%s command",{ int e_ = (*__errno_location ()); log_error(logger, e_, "fgets failed on output of %s%s command"
, verb, verb_suffix); }
773 verb, verb_suffix){ int e_ = (*__errno_location ()); log_error(logger, e_, "fgets failed on output of %s%s command"
, verb, verb_suffix); }
;
774 pclose(f);
775 return false0;
776 } else {
777 passert(feof(f))({ _Bool assertion__ = feof(f); if (!assertion__) { where_t here
= ({ static const struct where here = { .func = __func__, .file
= "programs/pluto/kernel.c", .line = 777, }; &here; }); const
struct logger *logger_ = &failsafe_logger; llog_passert(
logger_, here, "%s", "feof(f)"); } (void) 1; })
;
778 break;
779 }
780 } else {
781 char *e = resp + strlen(resp);
782
783 if (e > resp && e[-1] == '\n')
784 e[-1] = '\0'; /* trim trailing '\n' */
785 llog(RC_LOG, logger, "%s%s output: %s", verb,
786 verb_suffix, resp);
787 }
788 }
789
790 /* report on and react to return code */
791 {
792 int r = pclose(f);
793
794 if (r == -1) {
795 log_errno(logger, errno,{ int e_ = (*__errno_location ()); log_error(logger, e_, "pclose failed for %s%s command"
, verb, verb_suffix); }
796 "pclose failed for %s%s command",{ int e_ = (*__errno_location ()); log_error(logger, e_, "pclose failed for %s%s command"
, verb, verb_suffix); }
797 verb, verb_suffix){ int e_ = (*__errno_location ()); log_error(logger, e_, "pclose failed for %s%s command"
, verb, verb_suffix); }
;
798 return false0;
799 } else if (WIFEXITED(r)(((r) & 0x7f) == 0)) {
800 if (WEXITSTATUS(r)(((r) & 0xff00) >> 8) != 0) {
801 llog(RC_LOG_SERIOUS, logger,
802 "%s%s command exited with status %d",
803 verb, verb_suffix,
804 WEXITSTATUS(r)(((r) & 0xff00) >> 8));
805 return false0;
806 }
807 } else if (WIFSIGNALED(r)(((signed char) (((r) & 0x7f) + 1) >> 1) > 0)) {
808 llog(RC_LOG_SERIOUS, logger,
809 "%s%s command exited with signal %d",
810 verb, verb_suffix, WTERMSIG(r)((r) & 0x7f));
811 return false0;
812 } else {
813 llog(RC_LOG_SERIOUS, logger,
814 "%s%s command exited with unknown status %d",
815 verb, verb_suffix, r);
816 return false0;
817 }
818 }
819 }
820 return true1;
821}
822
823/*
824 * Build an array of encapsulation rules/tmpl. Order things
825 * inner-most to outer-most so the last entry is what will go across
826 * the wire. A -1 entry of the packet to be encapsulated is implied.
827 */
828
829static struct kernel_encap kernel_encap_from_spd(lset_t policy,
830 const struct spd_route *spd,
831 enum encap_mode mode)
832{
833 struct kernel_encap encap = {
834 .mode = mode,
835 };
836
837 /*
838 * XXX: remember construct this inner-to-outer; which is the
839 * same as the kernel_sa array.
840 */
841
842 struct encap_rule *outer = encap.rule - 1;
843 if (policy & POLICY_COMPRESS((lset_t)1 << (POLICY_COMPRESS_IX))) {
844 outer++;
845 outer->reqid = reqid_ipcomp(spd->reqid);
846 outer->proto = ENCAP_PROTO_IPCOMP;
847 }
848 if (policy & POLICY_ENCRYPT((lset_t)1 << (POLICY_ENCRYPT_IX))) {
849 outer++;
850 outer->reqid = reqid_esp(spd->reqid);
851 outer->proto = ENCAP_PROTO_ESP;
852 }
853 if (policy & POLICY_AUTHENTICATE((lset_t)1 << (POLICY_AUTHENTICATE_IX))) {
854 outer++;
855 outer->reqid = reqid_ah(spd->reqid);
856 outer->proto = ENCAP_PROTO_AH;
857 }
858
859 passert(outer < encap.rule + elemsof(encap.rule))({ _Bool assertion__ = outer < encap.rule + (sizeof(encap.
rule) / sizeof(*(encap.rule))); if (!assertion__) { where_t here
= ({ static const struct where here = { .func = __func__, .file
= "programs/pluto/kernel.c", .line = 859, }; &here; }); const
struct logger *logger_ = &failsafe_logger; llog_passert(
logger_, here, "%s", "outer < encap.rule + (sizeof(encap.rule) / sizeof(*(encap.rule)))"
); } (void) 1; })
;
860 encap.outer = outer - encap.rule; /* could be -1 */
861 passert(encap.outer < (int)elemsof(encap.rule))({ _Bool assertion__ = encap.outer < (int)(sizeof(encap.rule
) / sizeof(*(encap.rule))); if (!assertion__) { where_t here =
({ static const struct where here = { .func = __func__, .file
= "programs/pluto/kernel.c", .line = 861, }; &here; }); const
struct logger *logger_ = &failsafe_logger; llog_passert(
logger_, here, "%s", "encap.outer < (int)(sizeof(encap.rule) / sizeof(*(encap.rule)))"
); } (void) 1; })
;
862
863 /*
864 * XXX: Inner here refers to the inner-most rule which, for a
865 * tunnel, needs the tunnel bit set. For transport, why it
866 * uses outer remains a mystery (suspect it just needs to be
867 * !INT !IPIP).
868 */
869 if (outer >= encap.rule) {
870 encap.inner_proto = (mode == ENCAP_MODE_TUNNEL ? &ip_protocol_ipipip_protocols[IPPROTO_IPIP] :
871 mode == ENCAP_MODE_TRANSPORT ? protocol_by_ipproto(outer->proto) :
872 NULL((void*)0));
873 pexpect(encap.inner_proto != NULL)({ _Bool assertion__ = encap.inner_proto != ((void*)0); if (!
assertion__) { where_t here_ = ({ static const struct where here
= { .func = __func__, .file = "programs/pluto/kernel.c", .line
= 873, }; &here; }); const struct logger *logger_ = &
failsafe_logger; llog_pexpect(logger_, here_, "%s", "encap.inner_proto != ((void*)0)"
); } assertion__; })
;
874 }
875
876 return encap;
877}
878
879static struct kernel_encap kernel_encap_from_state(const struct state *st,
880 const struct spd_route *spd)
881{
882 bool_Bool tunnel = false0;
883 lset_t policy = LEMPTY((lset_t)0);
884 if (st->st_ipcomp.present) {
885 policy |= POLICY_COMPRESS((lset_t)1 << (POLICY_COMPRESS_IX));
886 tunnel |= (st->st_ipcomp.attrs.mode == ENCAPSULATION_MODE_TUNNEL);
887 }
888
889 if (st->st_esp.present) {
890 policy |= POLICY_ENCRYPT((lset_t)1 << (POLICY_ENCRYPT_IX));
891 tunnel |= (st->st_esp.attrs.mode == ENCAPSULATION_MODE_TUNNEL);
892 }
893
894 if (st->st_ah.present) {
895 policy |= POLICY_AUTHENTICATE((lset_t)1 << (POLICY_AUTHENTICATE_IX));
896 tunnel |= (st->st_ah.attrs.mode == ENCAPSULATION_MODE_TUNNEL);
897 }
898
899 enum encap_mode mode = (tunnel ? ENCAP_MODE_TUNNEL : ENCAP_MODE_TRANSPORT);
900 struct kernel_encap encap = kernel_encap_from_spd(policy, spd, mode);
901 return encap;
902}
903
904static struct kernel_route kernel_route_from_spd(const struct spd_route *spd,
905 enum encap_mode mode,
906 enum encap_direction flow)
907{
908 /*
909 * With pfkey and transport mode with nat-traversal we need to
910 * change the remote IPsec SA to point to external ip of the
911 * peer. Here we substitute real client ip with NATD ip.
912 *
913 * Bug #1004 fix.
914 *
915 * There really isn't "client" with XFRM and transport mode so
916 * eroute must be done to natted, visible ip. If we don't hide
917 * internal IP, communication doesn't work.
918 */
919 ip_selector remote_client;
920 switch (mode) {
921 case ENCAP_MODE_TUNNEL:
922 remote_client = spd->that.client;
923 break;
924 case ENCAP_MODE_TRANSPORT:
925 remote_client = selector_from_address_protocol_port(spd->that.host_addr,
926 protocol_by_ipproto(spd->that.protocol),
927 selector_port(spd->that.client));
928 break;
929 default:
930 bad_case(mode)libreswan_bad_case("mode", (mode), ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 930, }; &here; }))
;
931 }
932 selector_buf os, ns;
933 dbg("%s() changing remote selector %s to %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("%s() changing remote selector %s to %s", __func__
, str_selector(&spd->that.client, &os), str_selector
(&remote_client, &ns)); } }
934 __func__,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("%s() changing remote selector %s to %s", __func__
, str_selector(&spd->that.client, &os), str_selector
(&remote_client, &ns)); } }
935 str_selector(&spd->that.client, &os),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("%s() changing remote selector %s to %s", __func__
, str_selector(&spd->that.client, &os), str_selector
(&remote_client, &ns)); } }
936 str_selector(&remote_client, &ns)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("%s() changing remote selector %s to %s", __func__
, str_selector(&spd->that.client, &os), str_selector
(&remote_client, &ns)); } }
;
937
938 struct kernel_route route = {0};
939 struct route_end *local;
940 struct route_end *remote;
941
942 switch (flow) {
943 case ENCAP_DIRECTION_INBOUND:
944 remote = &route.src;
945 local = &route.dst;
946 break;
947 case ENCAP_DIRECTION_OUTBOUND:
948 local = &route.src;
949 remote = &route.dst;
950 break;
951 default:
952 bad_case(flow)libreswan_bad_case("flow", (flow), ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 952, }; &here; }))
;
953 }
954
955 local->client = spd->this.client;
956 remote->client = remote_client;
957 local->host_addr = spd->this.host_addr;
958 remote->host_addr = spd->that.host_addr;
959
960 return route;
961}
962
963/*
964 * handle co-terminal attempt of the "near" kind
965 *
966 * Note: it mutates both inside and outside
967 */
968
969enum routability {
970 route_impossible,
971 route_easy,
972 route_nearconflict,
973 route_farconflict,
974 route_unnecessary
975};
976
977static enum routability note_nearconflict(struct connection *outside, /* CK_PERMANENT */
978 struct connection *inside, /* CK_TEMPLATE */
979 struct logger *logger)
980{
981 /*
982 * this is a co-terminal attempt of the "near" kind.
983 * when chaining, we chain from inside to outside
984 *
985 * XXX permit multiple deep connections?
986 */
987 passert(inside->policy_next == NULL)({ _Bool assertion__ = inside->policy_next == ((void*)0); if
(!assertion__) { where_t here = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 987, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "inside->policy_next == ((void*)0)"
); } (void) 1; })
;
988
989 inside->policy_next = outside;
990
991 /*
992 * since we are going to steal the eroute from the secondary
993 * policy, we need to make sure that it no longer thinks that
994 * it owns the eroute.
995 */
996 outside->spd.eroute_owner = SOS_NOBODY0;
997 outside->spd.routing = RT_UNROUTED_KEYED;
998
999 /*
1000 * set the priority of the new eroute owner to be higher
1001 * than that of the current eroute owner
1002 */
1003 inside->policy_prio = outside->policy_prio + 1;
1004
1005 connection_buf inst;
1006 llog(RC_LOG_SERIOUS, logger,
1007 "conflict on eroute (%s), switching eroute to %s and linking %s",
1008 str_connection_instance(inside, &inst),
1009 inside->name, outside->name);
1010
1011 return route_nearconflict;
1012}
1013
1014/*
1015 * Note: this may mutate c
1016 */
1017static enum routability could_route(struct connection *c, struct logger *logger)
1018{
1019 esb_buf b;
1020 dbg("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="PRI_SHUNK,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="
"%.*s", c->name, enum_show(&connection_kind_names, c->
kind, &b), bool_str(c->spd.that.has_client), bool_str(
c->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX
))), c->spd.this.host_port, ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1021 c->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="
"%.*s", c->name, enum_show(&connection_kind_names, c->
kind, &b), bool_str(c->spd.that.has_client), bool_str(
c->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX
))), c->spd.this.host_port, ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1022 enum_show(&connection_kind_names, c->kind, &b),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="
"%.*s", c->name, enum_show(&connection_kind_names, c->
kind, &b), bool_str(c->spd.that.has_client), bool_str(
c->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX
))), c->spd.this.host_port, ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1023 bool_str(c->spd.that.has_client),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="
"%.*s", c->name, enum_show(&connection_kind_names, c->
kind, &b), bool_str(c->spd.that.has_client), bool_str(
c->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX
))), c->spd.this.host_port, ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1024 bool_str(c->policy & POLICY_OPPORTUNISTIC),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="
"%.*s", c->name, enum_show(&connection_kind_names, c->
kind, &b), bool_str(c->spd.that.has_client), bool_str(
c->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX
))), c->spd.this.host_port, ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1025 c->spd.this.host_port,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="
"%.*s", c->name, enum_show(&connection_kind_names, c->
kind, &b), bool_str(c->spd.that.has_client), bool_str(
c->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX
))), c->spd.this.host_port, ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1026 pri_shunk(c->config->sec_label)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: could_route called for %s; kind=%s that.has_client=%s oppo=%s this.host_port=%u sec_label="
"%.*s", c->name, enum_show(&connection_kind_names, c->
kind, &b), bool_str(c->spd.that.has_client), bool_str(
c->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX
))), c->spd.this.host_port, ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
;
1027
1028 /* it makes no sense to route a connection that is ISAKMP-only */
1029 if (!NEVER_NEGOTIATE(c->policy)(((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX
)) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t
)0)))
&& !HAS_IPSEC_POLICY(c->policy)(((c->policy) & (((lset_t)1 << (POLICY_NOPMTUDISC_IX
)) - ((lset_t)1 << (POLICY_ENCRYPT_IX)) + ((lset_t)1 <<
(POLICY_NOPMTUDISC_IX)))) != 0)
) {
1030 llog(RC_ROUTE, logger,
1031 "cannot route an ISAKMP-only connection");
1032 return route_impossible;
1033 }
1034
1035 /*
1036 * if this is a transport SA, and overlapping SAs are supported, then
1037 * this route is not necessary at all.
1038 */
1039 if (kernel_ops->overlap_supported && !LIN(POLICY_TUNNEL, c->policy)(((((lset_t)1 << (POLICY_TUNNEL_IX))) & (c->policy
)) == (((lset_t)1 << (POLICY_TUNNEL_IX))))
)
1040 return route_unnecessary;
1041
1042 /*
1043 * If this is a template connection, we cannot route.
1044 * However, opportunistic and sec_label templates can be
1045 * routed (as in install the policy).
1046 */
1047 if (!c->spd.that.has_client &&
1048 c->kind == CK_TEMPLATE &&
1049 !(c->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) &&
1050 c->config->sec_label.len == 0) {
1051 policy_buf pb;
1052 llog(RC_ROUTE, logger,
1053 "cannot route template policy of %s",
1054 str_policy(c->policy, &pb));
1055 return route_impossible;
1056 }
1057
1058 struct spd_route *esr, *rosr;
1059 struct connection *ero, /* who, if anyone, owns our eroute? */
1060 *ro = route_owner(c, &c->spd, &rosr, &ero, &esr); /* who owns our route? */
1061
1062 /*
1063 * If there is already a route for peer's client subnet
1064 * and it disagrees about interface or nexthop, we cannot steal it.
1065 * Note: if this connection is already routed (perhaps for another
1066 * state object), the route will agree.
1067 * This is as it should be -- it will arise during rekeying.
1068 */
1069 if (ro != NULL((void*)0) && !routes_agree(ro, c)((ro)->interface->ip_dev == (c)->interface->ip_dev
&& sameaddr(&(ro)->spd.this.host_nexthop, &
(c)->spd.this.host_nexthop))
) {
1070
1071 if (!compatible_overlapping_connections(c, ero)) {
1072 /*
1073 * Another connection is already using the eroute.
1074 * TODO: XFRM supports this. For now, only allow this for OE
1075 */
1076 if ((c->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) == LEMPTY((lset_t)0)) {
1077 connection_buf cib;
1078 llog(RC_LOG_SERIOUS, logger,
1079 "cannot route -- route already in use for "PRI_CONNECTION"\"%s\"%s""",
1080 pri_connection(ro, &cib)(ro)->name, str_connection_instance(ro, &cib));
1081 return route_impossible;
1082 } else {
1083 connection_buf cib;
1084 llog(RC_LOG_SERIOUS, logger,
1085 "cannot route -- route already in use for "PRI_CONNECTION"\"%s\"%s"" - but allowing anyway",
1086 pri_connection(ro, &cib)(ro)->name, str_connection_instance(ro, &cib));
1087 }
1088 }
1089 }
1090
1091
1092 /* if there is an eroute for another connection, there is a problem */
1093 if (ero != NULL((void*)0) && ero != c) {
1094 /*
1095 * note, wavesec (PERMANENT) goes *outside* and
1096 * OE goes *inside* (TEMPLATE)
1097 */
1098 if (ero->kind == CK_PERMANENT &&
1099 c->kind == CK_TEMPLATE) {
1100 return note_nearconflict(ero, c, logger);
1101 } else if (c->kind == CK_PERMANENT &&
1102 ero->kind == CK_TEMPLATE) {
1103 return note_nearconflict(c, ero, logger);
1104 }
1105
1106 /* look along the chain of policies for one with the same name */
1107
1108 for (struct connection *ep = ero; ep != NULL((void*)0); ep = ero->policy_next) {
1109 if (ep->kind == CK_TEMPLATE &&
1110 streq(ep->name, c->name)(strcmp((ep->name), (c->name)) == 0))
1111 return route_easy;
1112 }
1113
1114 /*
1115 * If we fell off the end of the list, then we found no
1116 * TEMPLATE so there must be a conflict that we can't resolve.
1117 * As the names are not equal, then we aren't
1118 * replacing/rekeying.
1119 *
1120 * ??? should there not be a conflict if ANYTHING in the list,
1121 * other than c, conflicts with c?
1122 */
1123
1124 if (LDISJOINT(POLICY_OVERLAPIP, c->policy | ero->policy)(((((lset_t)1 << (POLICY_OVERLAPIP_IX))) & (c->policy
| ero->policy)) == ((lset_t)0))
&& c->config->sec_label.len == 0) {
1125 /*
1126 * another connection is already using the eroute,
1127 * TODO: XFRM apparently can do this though
1128 */
1129 connection_buf erob;
1130 llog(RC_LOG_SERIOUS, logger,
1131 "cannot install eroute -- it is in use for "PRI_CONNECTION"\"%s\"%s"" #%lu",
1132 pri_connection(ero, &erob)(ero)->name, str_connection_instance(ero, &erob), esr->eroute_owner);
1133 return route_impossible;
1134 }
1135
1136 connection_buf erob;
1137 dbg("kernel: overlapping permitted with "PRI_CONNECTION" #%lu",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: overlapping permitted with ""\"%s\"%s"
" #%lu", (ero)->name, str_connection_instance(ero, &erob
), esr->eroute_owner); } }
1138 pri_connection(ero, &erob), esr->eroute_owner){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: overlapping permitted with ""\"%s\"%s"
" #%lu", (ero)->name, str_connection_instance(ero, &erob
), esr->eroute_owner); } }
;
1139 }
1140 return route_easy;
1141}
1142
1143bool_Bool trap_connection(struct connection *c)
1144{
1145 enum routability r = could_route(c, c->logger);
1146
1147 switch (r) {
1148 case route_impossible:
1149 return false0;
1150
1151 case route_easy:
1152 case route_nearconflict:
1153 if (c->ike_version == IKEv2 && c->config->sec_label.len > 0) {
1154 /*
1155 * IKEv2 security labels are treated
1156 * specially: this allocates and installs a
1157 * full REQID, the route_and_eroute() call
1158 * does not (and who knows what else it does).
1159 */
1160 dbg("kernel: installing SE trap policy"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: installing SE trap policy"); } }
;
1161 return install_se_connection_policies(c, c->logger);
1162 } else if (c->spd.routing >= RT_ROUTED_TUNNEL) {
1163 /*
1164 * RT_ROUTED_TUNNEL is treated specially: we
1165 * don't override because we don't want to
1166 * lose track of the IPSEC_SAs etc.
1167 *
1168 * ??? The test treats RT_UNROUTED_KEYED
1169 * specially too.
1170 *
1171 * XXX: ah, I was wondering ...
1172 */
1173 dbg("kernel: skipping trap policy as >=ROUTED_TUNNEL"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: skipping trap policy as >=ROUTED_TUNNEL"
); } }
;
1174 return true1;
1175 } else {
1176 return route_and_eroute(c, &c->spd, NULL((void*)0), c->logger);
1177 }
1178
1179 case route_farconflict:
1180 return false0;
1181
1182 case route_unnecessary:
1183 return true1;
1184
1185 default:
1186 bad_case(r)libreswan_bad_case("r", (r), ({ static const struct where here
= { .func = __func__, .file = "programs/pluto/kernel.c", .line
= 1186, }; &here; }))
;
1187 }
1188}
1189
1190/*
1191 * Add/replace/delete a shunt eroute.
1192 *
1193 * Such an eroute determines the fate of packets without the use
1194 * of any SAs. These are defaults, in effect.
1195 * If a negotiation has not been attempted, use %trap.
1196 * If negotiation has failed, the choice between %trap/%pass/%drop/%reject
1197 * is specified in the policy of connection c.
1198 */
1199
1200bool_Bool shunt_policy(enum kernel_policy_op op,
1201 const struct connection *c,
1202 const struct spd_route *sr,
1203 enum routing_t rt_kind,
1204 const char *what,
1205 struct logger *logger)
1206{
1207 LSWDBGP(DBG_BASE, buf)for (_Bool lswlog_p = (cur_debugging & (((lset_t)1 <<
(DBG_BASE_IX)))); lswlog_p; lswlog_p = 0) for (char lswbuf[(
(size_t)1024)], *lswbuf_ = lswbuf; lswbuf_ != ((void*)0); lswbuf_
= ((void*)0)) for (struct jambuf jambuf = array_as_jambuf((lswbuf
), sizeof(lswbuf)), *buf = &jambuf; buf != ((void*)0); buf
= ((void*)0)) for (; buf != ((void*)0); jambuf_to_logger(buf
, &failsafe_logger, DEBUG_STREAM), buf = ((void*)0))
{
1208 jam(buf, "kernel: %s() %s %s",
1209 __func__, enum_name_short(&kernel_policy_op_names, op), what);
1210
1211 jam(buf, " ");
1212 jam_connection(buf, c);
1213
1214 jam(buf, " for rt_kind '%s' using",
1215 enum_name(&routing_story, rt_kind));
1216
1217 jam(buf, " ");
1218 jam_selector(buf, &sr->this.client);
1219 jam(buf, "-%s->", protocol_by_ipproto(sr->this.protocol)->name);
1220 jam_selector(buf, &sr->that.client);
1221
1222 jam(buf, " sec_label=");
1223 if (sr->this.sec_label.len > 0) {
1224 jam_sanitized_hunk(buf, sr->this.sec_label)({ typeof(sr->this.sec_label) hunk_ = (sr->this.sec_label
); jam_sanitized_bytes(buf, hunk_.ptr, hunk_.len); })
;
1225 jam(buf, " (this)");
1226#if 0
1227 } else if (c->config->sec_label.len > 0) {
1228 jam_sanitized_hunk(buf, c->config->sec_label)({ typeof(c->config->sec_label) hunk_ = (c->config->
sec_label); jam_sanitized_bytes(buf, hunk_.ptr, hunk_.len); }
)
;
1229 jam(buf, " (config)");
1230#endif
1231 }
1232 }
1233
1234 bool_Bool ok = kernel_ops->shunt_policy(op, c, sr, rt_kind, what, logger);
1235 dbg("kernel: %s() returned %s", __func__, bool_str(ok)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() returned %s", __func__, bool_str
(ok)); } }
;
1236 return ok;
1237}
1238
1239static bool_Bool sag_eroute(const struct state *st,
1240 const struct spd_route *sr,
1241 enum kernel_policy_op op,
1242 const char *opname)
1243{
1244 struct connection *c = st->st_connection;
1245
1246 /*
1247 * Figure out the SPI and protocol (in two forms) for the
1248 * outer transformation.
1249 */
1250
1251 const struct kernel_encap encap = kernel_encap_from_state(st, sr);
1252 /* check for no transform at all */
1253 passert(encap.outer >= 0)({ _Bool assertion__ = encap.outer >= 0; if (!assertion__)
{ where_t here = ({ static const struct where here = { .func
= __func__, .file = "programs/pluto/kernel.c", .line = 1253,
}; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_passert(logger_, here, "%s", "encap.outer >= 0"); }
(void) 1; })
;
1254
1255 uint32_t xfrm_if_id = c->xfrmi != NULL((void*)0) ? c->xfrmi->if_id : 0;
1256
1257 pexpect((op & KERNEL_POLICY_DIR_MASK) == KERNEL_POLICY_DIR_OUT)({ _Bool assertion__ = (op & KERNEL_POLICY_DIR_MASK) == KERNEL_POLICY_DIR_OUT
; if (!assertion__) { where_t here_ = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 1257, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "(op & KERNEL_POLICY_DIR_MASK) == KERNEL_POLICY_DIR_OUT"
); } assertion__; })
;
1258 struct kernel_route route = kernel_route_from_spd(sr, encap.mode,
1259 ENCAP_DIRECTION_OUTBOUND);
1260
1261 /* hack */
1262 char why[256];
1263 snprintf(why, sizeof(why), "%s() %s", __func__, opname);
1264
1265 return eroute_connection(op, why, sr, ntohl(SPI_IGNORE), ntohl(SPI_IGNORE),
1266 &route, encap.inner_proto->ipproto, &encap,
1267 calculate_sa_prio(c, false0), &c->sa_marks,
1268 xfrm_if_id,
1269 HUNK_AS_SHUNK(c->config->sec_label)({ typeof(c->config->sec_label) h_ = (c->config->
sec_label); shunk2(h_.ptr, h_.len); })
,
1270 st->st_logger);
1271}
1272
1273void migration_up(struct connection *c, struct state *st)
1274{
1275 for (struct spd_route *sr = &c->spd; sr != NULL((void*)0); sr = sr->spd_next) {
1276#ifdef IPSEC_CONNECTION_LIMIT
1277 num_ipsec_eroute++;
1278#endif
1279 sr->routing = RT_ROUTED_TUNNEL; /* do now so route_owner won't find us */
1280 do_command(c, sr, "up", st, st->st_logger);
1281 do_command(c, sr, "route", st, st->st_logger);
1282 }
1283}
1284
1285void migration_down(struct connection *c, struct state *st)
1286{
1287 for (struct spd_route *sr = &c->spd; sr != NULL((void*)0); sr = sr->spd_next) {
1288 enum routing_t cr = sr->routing;
1289
1290#ifdef IPSEC_CONNECTION_LIMIT
1291 if (erouted(cr)((cr) != RT_UNROUTED))
1292 num_ipsec_eroute--;
1293#endif
1294
1295 sr->routing = RT_UNROUTED; /* do now so route_owner won't find us */
1296
1297 /* only unroute if no other connection shares it */
1298 if (routed(cr)((cr) > RT_UNROUTED_HOLD) && route_owner(c, sr, NULL((void*)0), NULL((void*)0), NULL((void*)0)) == NULL((void*)0)) {
1299 do_command(c, sr, "down", st, st->st_logger);
1300 st->st_mobike_del_src_ip = true1;
1301 do_command(c, sr, "unroute", st, st->st_logger);
1302 st->st_mobike_del_src_ip = false0;
1303 }
1304 }
1305}
1306
1307
1308/*
1309 * Delete any eroute for a connection and unroute it if route isn't
1310 * shared.
1311 */
1312void unroute_connection(struct connection *c)
1313{
1314 for (struct spd_route *sr = &c->spd; sr != NULL((void*)0); sr = sr->spd_next) {
1315 enum routing_t cr = sr->routing;
1316
1317 if (erouted(cr)((cr) != RT_UNROUTED)) {
1318 /* cannot handle a live one */
1319 passert(cr != RT_ROUTED_TUNNEL)({ _Bool assertion__ = cr != RT_ROUTED_TUNNEL; if (!assertion__
) { where_t here = ({ static const struct where here = { .func
= __func__, .file = "programs/pluto/kernel.c", .line = 1319,
}; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_passert(logger_, here, "%s", "cr != RT_ROUTED_TUNNEL")
; } (void) 1; })
;
1320 shunt_policy(KP_DELETE_OUTBOUND, c, sr, RT_UNROUTED,
1321 "unrouting connection",
1322 c->logger);
1323#ifdef IPSEC_CONNECTION_LIMIT
1324 num_ipsec_eroute--;
1325#endif
1326 }
1327
1328 sr->routing = RT_UNROUTED; /* do now so route_owner won't find us */
1329
1330 /* only unroute if no other connection shares it */
1331 if (routed(cr)((cr) > RT_UNROUTED_HOLD) && route_owner(c, sr, NULL((void*)0), NULL((void*)0), NULL((void*)0)) == NULL((void*)0)) {
1332 do_command(c, sr, "unroute", NULL((void*)0), c->logger);
1333 }
1334 }
1335}
1336
1337#include "kernel_alg.h"
1338
1339/* find an entry in the bare_shunt table.
1340 * Trick: return a pointer to the pointer to the entry;
1341 * this allows the entry to be deleted.
1342 */
1343struct bare_shunt **bare_shunt_ptr(const ip_selector *our_client,
1344 const ip_selector *peer_client,
1345 int transport_proto,
1346 const char *why)
1347
1348{
1349 selectors_buf sb;
1350 dbg("kernel: %s looking for %s (%d)",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s looking for %s (%d)", why, str_selectors
(our_client, peer_client, &sb), transport_proto); } }
1351 why, str_selectors(our_client, peer_client, &sb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s looking for %s (%d)", why, str_selectors
(our_client, peer_client, &sb), transport_proto); } }
1352 transport_proto){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s looking for %s (%d)", why, str_selectors
(our_client, peer_client, &sb), transport_proto); } }
;
1353#if 0
1354 /* XXX: transport_proto is redundant */
1355 pexpect(selector_protocol(our_client)->ipproto == (unsigned)transport_proto)({ _Bool assertion__ = selector_protocol(our_client)->ipproto
== (unsigned)transport_proto; if (!assertion__) { where_t here_
= ({ static const struct where here = { .func = __func__, .file
= "programs/pluto/kernel.c", .line = 1355, }; &here; });
const struct logger *logger_ = &failsafe_logger; llog_pexpect
(logger_, here_, "%s", "selector_protocol(our_client)->ipproto == (unsigned)transport_proto"
); } assertion__; })
;
1356 pexpect(selector_protocol(peer_client)->ipproto == (unsigned)transport_proto)({ _Bool assertion__ = selector_protocol(peer_client)->ipproto
== (unsigned)transport_proto; if (!assertion__) { where_t here_
= ({ static const struct where here = { .func = __func__, .file
= "programs/pluto/kernel.c", .line = 1356, }; &here; });
const struct logger *logger_ = &failsafe_logger; llog_pexpect
(logger_, here_, "%s", "selector_protocol(peer_client)->ipproto == (unsigned)transport_proto"
); } assertion__; })
;
1357#endif
1358 for (struct bare_shunt **pp = &bare_shunts; *pp != NULL((void*)0); pp = &(*pp)->next) {
1359 struct bare_shunt *p = *pp;
1360 dbg_bare_shunt("comparing", p);
1361 if (transport_proto == p->transport_proto &&
1362 selector_range_eq_selector_range(*our_client, p->our_client) &&
1363 selector_range_eq_selector_range(*peer_client, p->peer_client)) {
1364 return pp;
1365 }
1366 }
1367 return NULL((void*)0);
1368}
1369
1370/* free a bare_shunt entry, given a pointer to the pointer */
1371static void free_bare_shunt(struct bare_shunt **pp)
1372{
1373 struct bare_shunt *p;
1374
1375 passert(pp != NULL)({ _Bool assertion__ = pp != ((void*)0); if (!assertion__) { where_t
here = ({ static const struct where here = { .func = __func__
, .file = "programs/pluto/kernel.c", .line = 1375, }; &here
; }); const struct logger *logger_ = &failsafe_logger; llog_passert
(logger_, here, "%s", "pp != ((void*)0)"); } (void) 1; })
;
1376
1377 p = *pp;
1378
1379 *pp = p->next;
1380 dbg_bare_shunt("delete", p);
1381 pfree(p);
1382}
1383
1384unsigned shunt_count(void)
1385{
1386 unsigned i = 0;
1387
1388 for (const struct bare_shunt *bs = bare_shunts; bs != NULL((void*)0); bs = bs->next)
1389 {
1390 i++;
1391 }
1392
1393 return i;
1394}
1395
1396void show_shunt_status(struct show *s)
1397{
1398 show_separator(s);
1399 show_comment(s, "Bare Shunt list:");
1400 show_separator(s);
1401
1402 for (const struct bare_shunt *bs = bare_shunts; bs != NULL((void*)0); bs = bs->next) {
1403 /* Print interesting fields. Ignore count and last_active. */
1404 selector_buf ourb;
1405 selector_buf peerb;
1406 said_buf sat;
1407 policy_prio_buf prio;
1408
1409 show_comment(s, "%s -%d-> %s => %s %s %s",
1410 str_selector(&(bs)->our_client, &ourb),
1411 bs->transport_proto,
1412 str_selector(&(bs)->peer_client, &peerb),
1413 str_said(&(bs)->said, &sat),
1414 str_policy_prio(bs->policy_prio, &prio),
1415 bs->why);
1416 }
1417}
1418
1419/*
1420 * Clear any bare shunt holds that overlap with the network we have
1421 * just routed. We only consider "narrow" holds: ones for a single
1422 * address to single address.
1423 */
1424static void clear_narrow_holds(const ip_selector *our_client,
1425 const ip_selector *peer_client,
1426 int transport_proto,
1427 struct logger *logger)
1428{
1429 struct bare_shunt *p, **pp;
1430
1431 for (pp = &bare_shunts; (p = *pp) != NULL((void*)0); ) {
1432 /*
1433 * is p->{local,remote} within {local,remote}.
1434 */
1435 if (p->said.spi == htonl(SPI_HOLD) &&
1436 transport_proto == p->transport_proto &&
1437 selector_in_selector(p->our_client, *our_client) &&
1438 selector_in_selector(p->peer_client, *peer_client)) {
1439 ip_address our_addr = selector_prefix(p->our_client);
1440 ip_address peer_addr = selector_prefix(p->peer_client);
1441 if (!delete_bare_shunt(&our_addr, &peer_addr,
1442 transport_proto, SPI_HOLD,
1443 /*skip_xfrm_policy_delete?*/false0,
1444 "clear_narrow_holds() removing clashing narrow hold",
1445 logger)) {
1446 /* ??? we could not delete a bare shunt */
1447 llog_bare_shunt(RC_LOG, logger, p, "failed to delete");
1448 break; /* unlikely to succeed a second time */
1449 } else if (*pp == p) {
1450 /*
1451 * ??? We deleted the wrong bare shunt!
1452 * This happened because more than one entry
1453 * matched and we happened to delete a
1454 * different one.
1455 * Log it! And keep deleting.
1456 */
1457 llog_bare_shunt(RC_LOG, logger, p, "UNEXPECTEDLY SURVIVING");
1458 pp = &bare_shunts; /* just in case, start over */
1459 }
1460 /*
1461 * ??? if we were sure that there could only be one
1462 * matching entry, we could break out of the FOR.
1463 * For an unknown reason this is not always the case,
1464 * so we will continue the loop, with pp unchanged.
1465 */
1466 } else {
1467 pp = &p->next;
1468 }
1469 }
1470}
1471
1472bool_Bool delete_bare_shunt(const ip_address *src_address,
1473 const ip_address *dst_address,
1474 int transport_proto, ipsec_spi_t cur_shunt_spi,
1475 bool_Bool skip_xfrm_policy_delete,
1476 const char *why, struct logger *logger)
1477{
1478 const struct ip_info *afi = address_type(src_address);
1479 pexpect(afi == address_type(dst_address))({ _Bool assertion__ = afi == address_type(dst_address); if (
!assertion__) { where_t here_ = ({ static const struct where here
= { .func = __func__, .file = "programs/pluto/kernel.c", .line
= 1479, }; &here; }); const struct logger *logger_ = &
failsafe_logger; llog_pexpect(logger_, here_, "%s", "afi == address_type(dst_address)"
); } assertion__; })
;
1480 const ip_protocol *protocol = protocol_by_ipproto(transport_proto);
1481 /* port? assumed wide? */
1482 ip_selector src = selector_from_address_protocol(*src_address, protocol);
1483 ip_selector dst = selector_from_address_protocol(*dst_address, protocol);
1484
1485 bool_Bool ok;
1486 if (kernel_ops->type == USE_XFRM && skip_xfrm_policy_delete) {
1487 selectors_buf sb;
1488 llog(RC_LOG, logger, "deleting bare shunt %s from pluto shunt table",
1489 str_selectors_sensitive(&src, &dst, &sb));
1490 ok = true1; /* always succeed */
1491 } else {
1492 selectors_buf sb;
1493 dbg("kernel: deleting bare shunt %s from kernel for %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: deleting bare shunt %s from kernel for %s"
, str_selectors(&src, &dst, &sb), why); } }
1494 str_selectors(&src, &dst, &sb), why){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: deleting bare shunt %s from kernel for %s"
, str_selectors(&src, &dst, &sb), why); } }
;
1495 const ip_address null_host = afi->address.any;
1496 /* assume low code logged action */
1497 ok = raw_policy(KP_DELETE_OUTBOUND,
1498 &null_host, &src, &null_host, &dst,
1499 htonl(cur_shunt_spi), htonl(SPI_PASS),
1500 transport_proto,
1501 ET_INT, esp_transport_proto_info&esp_transport_kernel_encap,
1502 deltatime(SHUNT_PATIENCE((2 * 10) * 15 / 2)),
1503 0, /* we don't know connection for priority yet */
1504 NULL((void*)0), /* sa_marks */
1505 0 /* xfrm interface id */,
1506 null_shunk, logger,
1507 "%s() %s", __func__, why);
1508 if (!ok) {
1509 /* did/should kernel log this? */
1510 selectors_buf sb;
1511 llog(RC_LOG, logger,
1512 "delete kernel shunt %s failed - deleting from pluto shunt table",
1513 str_selectors_sensitive(&src, &dst, &sb));
1514 }
1515 }
1516
1517 /*
1518 * We can have proto mismatching acquires with xfrm - this is
1519 * a bad workaround.
1520 *
1521 * ??? what is the nature of those mismatching acquires?
1522 *
1523 * XXX: for instance, when whack initiates an OE connection.
1524 * There is no kernel-acquire shunt to remove.
1525 */
1526
1527 struct bare_shunt **bs_pp = bare_shunt_ptr(&src, &dst, transport_proto, why);
1528 if (bs_pp == NULL((void*)0)) {
1529 selectors_buf sb;
1530 llog(RC_LOG, logger,
1531 "can't find expected bare shunt to delete: %s",
1532 str_selectors_sensitive(&src, &dst, &sb));
1533 return ok;
1534 }
1535
1536 free_bare_shunt(bs_pp);
1537 return ok;
1538}
1539
1540bool_Bool install_se_connection_policies(struct connection *c, struct logger *logger)
1541{
1542 connection_buf cb;
1543 dbg("kernel: %s() "PRI_CO" "PRI_CO" "PRI_CONNECTION" routed %s sec_label="PRI_SHUNK,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() ""$%u"" ""$%u"" ""\"%s\"%s"" routed %s sec_label="
"%.*s", __func__, ((c->serialno)), ((c->serial_from)), (
c)->name, str_connection_instance(c, &cb), enum_name(&
routing_story, c->spd.routing), ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1544 __func__, pri_co(c->serialno), pri_co(c->serial_from),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() ""$%u"" ""$%u"" ""\"%s\"%s"" routed %s sec_label="
"%.*s", __func__, ((c->serialno)), ((c->serial_from)), (
c)->name, str_connection_instance(c, &cb), enum_name(&
routing_story, c->spd.routing), ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1545 pri_connection(c, &cb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() ""$%u"" ""$%u"" ""\"%s\"%s"" routed %s sec_label="
"%.*s", __func__, ((c->serialno)), ((c->serial_from)), (
c)->name, str_connection_instance(c, &cb), enum_name(&
routing_story, c->spd.routing), ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1546 enum_name(&routing_story, c->spd.routing),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() ""$%u"" ""$%u"" ""\"%s\"%s"" routed %s sec_label="
"%.*s", __func__, ((c->serialno)), ((c->serial_from)), (
c)->name, str_connection_instance(c, &cb), enum_name(&
routing_story, c->spd.routing), ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
1547 pri_shunk(c->config->sec_label)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() ""$%u"" ""$%u"" ""\"%s\"%s"" routed %s sec_label="
"%.*s", __func__, ((c->serialno)), ((c->serial_from)), (
c)->name, str_connection_instance(c, &cb), enum_name(&
routing_story, c->spd.routing), ((int) (c->config->sec_label
).len), (const char *) ((c->config->sec_label).ptr)); }
}
;
1548
1549 if (!pexpect(c->ike_version == IKEv2)({ _Bool assertion__ = c->ike_version == IKEv2; if (!assertion__
) { where_t here_ = ({ static const struct where here = { .func
= __func__, .file = "programs/pluto/kernel.c", .line = 1549,
}; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_pexpect(logger_, here_, "%s", "c->ike_version == IKEv2"
); } assertion__; })
||
1550 !pexpect(c->config->sec_label.len > 0)({ _Bool assertion__ = c->config->sec_label.len > 0;
if (!assertion__) { where_t here_ = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 1550, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "c->config->sec_label.len > 0"
); } assertion__; })
||
1551 !pexpect(c->kind == CK_TEMPLATE)({ _Bool assertion__ = c->kind == CK_TEMPLATE; if (!assertion__
) { where_t here_ = ({ static const struct where here = { .func
= __func__, .file = "programs/pluto/kernel.c", .line = 1551,
}; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_pexpect(logger_, here_, "%s", "c->kind == CK_TEMPLATE"
); } assertion__; })
) {
1552 return false0;
1553 }
1554
1555 if (c->spd.routing != RT_UNROUTED) {
1556 dbg("kernel: %s() connection already routed", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() connection already routed", __func__
); } }
;
1557 return true1;
1558 }
1559
1560 enum encap_mode mode = (c->policy & POLICY_TUNNEL((lset_t)1 << (POLICY_TUNNEL_IX))) ? ENCAP_MODE_TUNNEL : ENCAP_MODE_TRANSPORT;
1561 const struct kernel_encap encap = kernel_encap_from_spd(c->policy, &c->spd, mode);
1562 if (encap.outer < 0) {
1563 /* XXX: log? */
1564 return false0;
1565 }
1566
1567 uint32_t priority = calculate_sa_prio(c, /*oe_shunt*/false0);
1568
1569 /*
1570 * SE installs both an outgoing and incoming policy. Normal
1571 * connections do not.
1572 */
1573 for (unsigned i = 0; i < 2; i++) {
1574 bool_Bool inbound = (i == 1);
1575 struct end *src = inbound ? &c->spd.that : &c->spd.this;
1576 struct end *dst = inbound ? &c->spd.this : &c->spd.that;
1577 if (!raw_policy(inbound ? KP_ADD_INBOUND : KP_ADD_OUTBOUND,
1578 /*src*/&src->host_addr, &src->client,
1579 /*dst*/&dst->host_addr, &dst->client,
1580 /*ignored?old/new*/htonl(SPI_PASS), ntohl(SPI_PASS),
1581 /*transport_proto*/c->spd.this.protocol,
1582 /*esatype*/encap.inner_proto->ipproto,
1583 /*encap*/&encap,
1584 /*use_lifetime*/deltatime(0),
1585 /*sa_priority*/priority,
1586 /*sa_marks*/NULL((void*)0),
1587 /*xfrm_if_id*/0,
1588 /*sec_label*/HUNK_AS_SHUNK(c->config->sec_label)({ typeof(c->config->sec_label) h_ = (c->config->
sec_label); shunk2(h_.ptr, h_.len); })
,
1589 /*logger*/logger,
1590 "%s() security label policy", __func__)) {
1591 if (inbound) {
1592 /*
1593 * Need to pull the just installed
1594 * outbound policy.
1595 *
1596 * XXX: this call highlights why
1597 * having both KP_*_REVERSED and and
1598 * reversed parameters is just so
1599 * lame. raw_policy can handle this.
1600 */
1601 dbg("pulling previously installed outbound policy"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("pulling previously installed outbound policy"
); } }
;
1602 pexpect(i > 0)({ _Bool assertion__ = i > 0; if (!assertion__) { where_t here_
= ({ static const struct where here = { .func = __func__, .file
= "programs/pluto/kernel.c", .line = 1602, }; &here; });
const struct logger *logger_ = &failsafe_logger; llog_pexpect
(logger_, here_, "%s", "i > 0"); } assertion__; })
;
1603 raw_policy(KP_DELETE_OUTBOUND,
1604 /*src*/&c->spd.this.host_addr, &c->spd.this.client,
1605 /*dst*/&c->spd.that.host_addr, &c->spd.that.client,
1606 /*ignored?old/new*/htonl(SPI_PASS), ntohl(SPI_PASS),
1607 /*transport_proto*/c->spd.this.protocol,
1608 /*esatype*/encap.inner_proto->ipproto,
1609 /*encap*/&encap,
1610 /*use_lifetime*/deltatime(0),
1611 /*sa_priority*/priority,
1612 /*sa_marks*/NULL((void*)0),
1613 /*xfrm_if_id*/0,
1614 /*sec_label*/HUNK_AS_SHUNK(c->config->sec_label)({ typeof(c->config->sec_label) h_ = (c->config->
sec_label); shunk2(h_.ptr, h_.len); })
,
1615 /*logger*/logger,
1616 "%s() security label policy", __func__);
1617 }
1618 return false0;
1619 }
1620 }
1621
1622 /* a new route: no deletion required, but preparation is */
1623 if (!do_command(c, &c->spd, "prepare", NULL((void*)0)/*ST*/, logger)) {
1624 dbg("kernel: %s() prepare command returned an error", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() prepare command returned an error"
, __func__); } }
;
1625 }
1626
1627 if (!do_command(c, &c->spd, "route", NULL((void*)0)/*ST*/, logger)) {
1628 /* Failure! Unwind our work. */
1629 dbg("kernel: %s() route command returned an error", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() route command returned an error"
, __func__); } }
;
1630 if (!do_command(c, &c->spd, "down", NULL((void*)0)/*st*/, logger)) {
1631 dbg("kernel: down command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: down command returned an error"); } }
;
1632 }
1633 dbg("kernel: %s() pulling policies", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() pulling policies", __func__); } }
;
1634 for (unsigned i = 0; i < 2; i++) {
1635 bool_Bool inbound = (i > 0);
1636 struct end *src = inbound ? &c->spd.that : &c->spd.this;
1637 struct end *dst = inbound ? &c->spd.this : &c->spd.that;
1638 /* ignore result */
1639 raw_policy(inbound ? KP_DELETE_INBOUND : KP_DELETE_OUTBOUND,
1640 /*src*/&src->host_addr, &src->client,
1641 /*dst*/&dst->host_addr, &dst->client,
1642 /*ignored?old/new*/htonl(SPI_PASS), ntohl(SPI_PASS),
1643 /*transport_proto*/c->spd.this.protocol,
1644 /*esatype*/encap.inner_proto->ipproto,
1645 /*encap*/&encap,
1646 /*use_lifetime*/deltatime(0),
1647 /*sa_priority*/priority,
1648 /*sa_marks*/NULL((void*)0),
1649 /*xfrm_if_id*/0,
1650 /*sec_label*/HUNK_AS_SHUNK(c->config->sec_label)({ typeof(c->config->sec_label) h_ = (c->config->
sec_label); shunk2(h_.ptr, h_.len); })
,
1651 /*logger*/logger,
1652 "%s() security label policy", __func__);
1653 }
1654 return false0;
1655 }
1656
1657 /* Success! */
1658 c->spd.routing = RT_ROUTED_PROSPECTIVE;
1659 return true1;
1660}
1661
1662bool_Bool eroute_connection(enum kernel_policy_op op, const char *opname,
1663 const struct spd_route *sr,
1664 ipsec_spi_t cur_spi,
1665 ipsec_spi_t new_spi,
1666 const struct kernel_route *route,
1667 enum eroute_type esatype,
1668 const struct kernel_encap *encap,
1669 uint32_t sa_priority,
1670 const struct sa_marks *sa_marks,
1671 const uint32_t xfrm_if_id,
1672 shunk_t sec_label,
1673 struct logger *logger)
1674{
1675 if (sr->this.has_cat) {
1676 ip_selector client = selector_from_address(sr->this.host_addr);
1677 bool_Bool t = raw_policy(op,
1678 &route->src.host_addr, &client,
1679 &route->dst.host_addr, &route->dst.client,
1680 cur_spi,
1681 new_spi,
1682 sr->this.protocol,
1683 esatype,
1684 encap,
1685 deltatime(0),
1686 sa_priority, sa_marks,
1687 xfrm_if_id,
1688 sec_label,
1689 logger,
1690 "CAT: %s() %s", __func__, opname);
1691 if (!t) {
1692 llog(RC_LOG, logger,
1693 "CAT: failed to eroute additional Client Address Translation policy");
1694 }
1695
1696 dbg("kernel: %s CAT extra route added return=%d", __func__, t){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s CAT extra route added return=%d", __func__
, t); } }
;
1697 }
1698
1699 return raw_policy(op,
1700 &route->src.host_addr, &route->src.client,
1701 &route->dst.host_addr, &route->dst.client,
1702 cur_spi,
1703 new_spi,
1704 sr->this.protocol,
1705 esatype,
1706 encap,
1707 deltatime(0),
1708 sa_priority, sa_marks,
1709 xfrm_if_id,
1710 sec_label,
1711 logger,
1712 "%s() %s", __func__, opname);
1713}
1714
1715/* assign a bare hold or pass to a connection */
1716bool_Bool assign_holdpass(const struct connection *c,
1717 struct spd_route *sr,
1718 int transport_proto, ipsec_spi_t negotiation_shunt,
1719 const ip_address *src, const ip_address *dst)
1720{
1721 /*
1722 * either the automatically installed %hold eroute is broad enough
1723 * or we try to add a broader one and delete the automatic one.
1724 * Beware: this %hold might be already handled, but still squeak
1725 * through because of a race.
1726 */
1727 enum routing_t ro = sr->routing; /* routing, old */
1728 enum routing_t rn = ro; /* routing, new */
1729
1730 passert(LHAS(LELEM(CK_PERMANENT) | LELEM(CK_INSTANCE), c->kind))({ _Bool assertion__ = (((((lset_t)1 << (CK_PERMANENT))
| ((lset_t)1 << (CK_INSTANCE))) & ((lset_t)1 <<
(c->kind))) != ((lset_t)0)); if (!assertion__) { where_t here
= ({ static const struct where here = { .func = __func__, .file
= "programs/pluto/kernel.c", .line = 1730, }; &here; });
const struct logger *logger_ = &failsafe_logger; llog_passert
(logger_, here, "%s", "(((((lset_t)1 << (CK_PERMANENT)) | ((lset_t)1 << (CK_INSTANCE))) & ((lset_t)1 << (c->kind))) != ((lset_t)0))"
); } (void) 1; })
;
1731 /* figure out what routing should become */
1732 switch (ro) {
1733 case RT_UNROUTED:
1734 rn = RT_UNROUTED_HOLD;
1735 break;
1736 case RT_ROUTED_PROSPECTIVE:
1737 rn = RT_ROUTED_HOLD;
1738 break;
1739 default:
1740 /* no change: this %hold or %pass is old news */
1741 break;
1742 }
1743
1744 dbg("kernel: assign hold, routing was %s, needs to be %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign hold, routing was %s, needs to be %s"
, enum_name(&routing_story, ro), enum_name(&routing_story
, rn)); } }
1745 enum_name(&routing_story, ro),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign hold, routing was %s, needs to be %s"
, enum_name(&routing_story, ro), enum_name(&routing_story
, rn)); } }
1746 enum_name(&routing_story, rn)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign hold, routing was %s, needs to be %s"
, enum_name(&routing_story, ro), enum_name(&routing_story
, rn)); } }
;
1747
1748 if (eclipsable(sr)(selector_contains_one_address((sr)->this.client) &&
selector_contains_one_address((sr)->that.client))
) {
1749 /*
1750 * Although %hold or %pass is appropriately broad, it will
1751 * no longer be bare so we must ditch it from the bare table
1752 */
1753 struct bare_shunt **old = bare_shunt_ptr(&sr->this.client, &sr->that.client,
1754 sr->this.protocol, "assign_holdpass");
1755
1756 if (old == NULL((void*)0)) {
1757 /* ??? should this happen? It does. */
1758 llog(RC_LOG, c->logger,
1759 "assign_holdpass() no bare shunt to remove? - mismatch?");
1760 } else {
1761 /* ??? should this happen? */
1762 dbg("kernel: assign_holdpass() removing bare shunt"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign_holdpass() removing bare shunt"
); } }
;
1763 free_bare_shunt(old);
1764 }
1765 } else {
1766 dbg("kernel: assign_holdpass() need broad(er) shunt"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign_holdpass() need broad(er) shunt"
); } }
;
1767 /*
1768 * we need a broad %hold, not the narrow one.
1769 * First we ensure that there is a broad %hold.
1770 * There may already be one (race condition): no need to
1771 * create one.
1772 * There may already be a %trap: replace it.
1773 * There may not be any broad eroute: add %hold.
1774 * Once the broad %hold is in place, delete the narrow one.
1775 */
1776 if (rn != ro) {
1777 int op;
1778 const char *reason;
1779
1780 if (erouted(ro)((ro) != RT_UNROUTED)) {
1781 op = KP_REPLACE_OUTBOUND;
1782 reason = "assign_holdpass() replace %trap with broad %pass or %hold";
1783 } else {
1784 op = KP_ADD_OUTBOUND;
1785 reason = "assign_holdpass() add broad %pass or %hold";
1786 }
1787
1788 pexpect((op & KERNEL_POLICY_DIR_MASK) == KERNEL_POLICY_DIR_OUT)({ _Bool assertion__ = (op & KERNEL_POLICY_DIR_MASK) == KERNEL_POLICY_DIR_OUT
; if (!assertion__) { where_t here_ = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 1788, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "(op & KERNEL_POLICY_DIR_MASK) == KERNEL_POLICY_DIR_OUT"
); } assertion__; })
;
1789 struct kernel_route route = kernel_route_from_spd(sr,
1790 ENCAP_MODE_TRANSPORT,
1791 ENCAP_DIRECTION_OUTBOUND);
1792 /*
1793 * XXX: why?
1794 *
1795 * Because only this end is interesting?
1796 * Because it is a shunt and the other end
1797 * doesn't matter?
1798 */
1799 route.dst.host_addr = address_type(&route.dst.host_addr)->address.any;
1800
1801 if (eroute_connection(op, reason,
1802 sr,
1803 htonl(SPI_HOLD), /* kernel induced */
1804 htonl(negotiation_shunt),
1805 &route, ET_INT,
1806 esp_transport_proto_info&esp_transport_kernel_encap,
1807 calculate_sa_prio(c, false0),
1808 NULL((void*)0), 0 /* xfrm_if_id */,
1809 HUNK_AS_SHUNK(c->config->sec_label)({ typeof(c->config->sec_label) h_ = (c->config->
sec_label); shunk2(h_.ptr, h_.len); })
,
1810 c->logger))
1811 {
1812 dbg("kernel: assign_holdpass() eroute_connection() done"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign_holdpass() eroute_connection() done"
); } }
;
1813 } else {
1814 llog(RC_LOG, c->logger,
1815 "assign_holdpass() eroute_connection() failed");
1816 return false0;
1817 }
1818 }
1819
1820 if (!delete_bare_shunt(src, dst,
1821 transport_proto,
1822 (c->policy & POLICY_NEGO_PASS((lset_t)1 << (POLICY_NEGO_PASS_IX))) ? SPI_PASS : SPI_HOLD,
1823 /*skip_xfrm_policy_delete?*/false0,
1824 ((c->policy & POLICY_NEGO_PASS((lset_t)1 << (POLICY_NEGO_PASS_IX))) ? "delete narrow %pass" :
1825 "assign_holdpass() delete narrow %hold"),
1826 c->logger)) {
1827 dbg("kernel: assign_holdpass() delete_bare_shunt() succeeded"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign_holdpass() delete_bare_shunt() succeeded"
); } }
;
1828 } else {
1829 llog(RC_LOG, c->logger,
1830 "assign_holdpass() delete_bare_shunt() failed");
1831 return false0;
1832 }
1833 }
1834 sr->routing = rn;
1835 dbg("kernel: assign_holdpass() done - returning success"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: assign_holdpass() done - returning success"
); } }
;
1836 return true1;
1837}
1838
1839/* compute a (host-order!) SPI to implement the policy in connection c */
1840enum policy_spi shunt_policy_spi(const struct connection *c, bool_Bool prospective)
1841{
1842 if (prospective) {
1843 /* note: these are in host order :-( */
1844 static const ipsec_spi_t shunt_spi[SHUNT_POLICY_ROOF] = {
1845 [SHUNT_DEFAULT] = SPI_TRAP, /* --initiateontraffic */
1846 [SHUNT_PASS] = SPI_PASS, /* --pass */
1847 [SHUNT_DROP] = SPI_DROP, /* --drop */
1848 [SHUNT_REJECT] = SPI_REJECT, /* --reject */
1849 };
1850 enum shunt_policy sp = (c->policy & POLICY_SHUNT_MASK(((lset_t)1 << (POLICY_SHUNT1_IX)) - ((lset_t)1 <<
(POLICY_SHUNT0_IX)) + ((lset_t)1 << (POLICY_SHUNT1_IX)
))
) >> POLICY_SHUNT_SHIFTPOLICY_SHUNT0_IX;
1851 passert(sp < elemsof(shunt_spi))({ _Bool assertion__ = sp < (sizeof(shunt_spi) / sizeof(*(
shunt_spi))); if (!assertion__) { where_t here = ({ static const
struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 1851, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "sp < (sizeof(shunt_spi) / sizeof(*(shunt_spi)))"
); } (void) 1; })
;
1852 return shunt_spi[sp];
1853 } else {
1854 /* note: these are in host order :-( */
1855 static const ipsec_spi_t fail_spi[SHUNT_POLICY_ROOF] = {
1856 [SHUNT_DEFAULT] = 0, /* --none*/
1857 [SHUNT_PASS] = SPI_PASS, /* --failpass */
1858 [SHUNT_DROP] = SPI_DROP, /* --faildrop */
1859 [SHUNT_REJECT] = SPI_REJECT, /* --failreject */
1860 };
1861 enum shunt_policy sp = (c->policy & POLICY_FAIL_MASK(((lset_t)1 << (POLICY_FAIL1_IX)) - ((lset_t)1 <<
(POLICY_FAIL0_IX)) + ((lset_t)1 << (POLICY_FAIL1_IX)))
) >> POLICY_FAIL_SHIFTPOLICY_FAIL0_IX;
1862 passert(sp < elemsof(fail_spi))({ _Bool assertion__ = sp < (sizeof(fail_spi) / sizeof(*(fail_spi
))); if (!assertion__) { where_t here = ({ static const struct
where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 1862, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "sp < (sizeof(fail_spi) / sizeof(*(fail_spi)))"
); } (void) 1; })
;
1863 return fail_spi[sp];
1864 }
1865}
1866
1867bool_Bool del_spi(ipsec_spi_t spi, const struct ip_protocol *proto,
1868 const ip_address *src, const ip_address *dst,
1869 struct logger *logger)
1870{
1871 said_buf sb;
1872 const char *text_said = said_str(*dst, proto, spi, &sb);
1873
1874 address_buf b;
1875 dbg("kernel: deleting spi %s -> %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: deleting spi %s -> %s", str_address
(src, &b), text_said); } }
1876 str_address(src, &b), text_said){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: deleting spi %s -> %s", str_address
(src, &b), text_said); } }
;
1877
1878 struct kernel_sa sa = {
1879 .spi = spi,
1880 .proto = proto,
1881 .src.address = src,
1882 .dst.address = dst,
1883 .story = text_said,
1884 };
1885
1886 passert(kernel_ops->del_sa != NULL)({ _Bool assertion__ = kernel_ops->del_sa != ((void*)0); if
(!assertion__) { where_t here = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 1886, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "kernel_ops->del_sa != ((void*)0)"
); } (void) 1; })
;
1887 return kernel_ops->del_sa(&sa, logger);
1888}
1889
1890static void setup_esp_nic_offload(struct kernel_sa *sa, struct connection *c,
1891 bool_Bool *nic_offload_fallback)
1892{
1893 if (c->nic_offload == yna_no ||
1894 c->interface == NULL((void*)0) || c->interface->ip_dev == NULL((void*)0) ||
1895 c->interface->ip_dev->id_rname == NULL((void*)0)) {
1896 dbg("kernel: NIC esp-hw-offload disabled for connection '%s'", c->name){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: NIC esp-hw-offload disabled for connection '%s'"
, c->name); } }
;
1897 return;
1898 }
1899
1900 if (c->nic_offload == yna_auto) {
1901 if (!c->interface->ip_dev->id_nic_offload) {
1902 dbg("kernel: NIC esp-hw-offload not for connection '%s' not available on interface %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: NIC esp-hw-offload not for connection '%s' not available on interface %s"
, c->name, c->interface->ip_dev->id_rname); } }
1903 c->name, c->interface->ip_dev->id_rname){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: NIC esp-hw-offload not for connection '%s' not available on interface %s"
, c->name, c->interface->ip_dev->id_rname); } }
;
1904 return;
1905 }
1906 *nic_offload_fallback = true1;
1907 dbg("kernel: NIC esp-hw-offload offload for connection '%s' enabled on interface %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: NIC esp-hw-offload offload for connection '%s' enabled on interface %s"
, c->name, c->interface->ip_dev->id_rname); } }
1908 c->name, c->interface->ip_dev->id_rname){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: NIC esp-hw-offload offload for connection '%s' enabled on interface %s"
, c->name, c->interface->ip_dev->id_rname); } }
;
1909 }
1910 sa->nic_offload_dev = c->interface->ip_dev->id_rname;
1911}
1912
1913/*
1914 * Set up one direction of the SA bundle
1915 */
1916static bool_Bool setup_half_ipsec_sa(struct state *st, bool_Bool inbound)
1917{
1918 /* Build an inbound or outbound SA */
1919
1920 struct connection *c = st->st_connection;
1921 bool_Bool replace = inbound && (kernel_ops->get_spi != NULL((void*)0));
1922 bool_Bool outgoing_ref_set = false0;
1923 bool_Bool incoming_ref_set = false0;
1924 IPsecSAref_t ref_peer = st->st_ref_peer;
1925 IPsecSAref_t new_ref_peer = IPSEC_SAREF_NULL((IPsecSAref_t)0u);
1926 bool_Bool nic_offload_fallback = false0;
1927
1928 /* SPIs, saved for spigrouping or undoing, if necessary */
1929 struct kernel_sa said[EM_MAXRELSPIS4];
1930 struct kernel_sa *said_next = said;
1931
1932 /* same scope as said[] */
1933 said_buf text_ipcomp;
1934 said_buf text_esp;
1935 said_buf text_ah;
1936
1937 /*
1938 * Construct the policy encapsulation rules; it determines
1939 * tunnel mode as a side effect.
1940 */
1941 struct kernel_encap encap = kernel_encap_from_state(st, &c->spd);
1942 if (!pexpect(encap.outer >= 0)({ _Bool assertion__ = encap.outer >= 0; if (!assertion__)
{ where_t here_ = ({ static const struct where here = { .func
= __func__, .file = "programs/pluto/kernel.c", .line = 1942,
}; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_pexpect(logger_, here_, "%s", "encap.outer >= 0"); }
assertion__; })
) {
1943 return false0;
1944 }
1945
1946 struct kernel_route route = kernel_route_from_spd(&c->spd, encap.mode,
1947 inbound ? ENCAP_DIRECTION_INBOUND : ENCAP_DIRECTION_OUTBOUND);
1948
1949 const struct kernel_sa said_boilerplate = {
1950 .src.address = &route.src.host_addr,
1951 .dst.address = &route.dst.host_addr,
1952 .src.client = &route.src.client,
1953 .dst.client = &route.dst.client,
1954 .inbound = inbound,
1955 .tunnel = (encap.mode == ENCAP_MODE_TUNNEL),
1956 .transport_proto = c->spd.this.protocol,
1957 .sa_lifetime = c->sa_ipsec_life_seconds,
1958 .outif = -1,
1959 .sec_label = (st->st_v1_seen_sec_label.len > 0 ? st->st_v1_seen_sec_label :
1960 st->st_v1_acquired_sec_label.len > 0 ? st->st_v1_acquired_sec_label :
1961 c->spd.this.sec_label /* assume connection outlive their kernel_sa's */),
1962 };
1963
1964 address_buf sab, dab;
1965 selector_buf scb, dcb;
1966 dbg("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="PRI_SHUNK"%s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1967 __func__,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1968 said_boilerplate.inbound ? "inbound" : "outbound",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1969 str_selector(said_boilerplate.src.client, &scb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1970 protocol_by_ipproto(said_boilerplate.transport_proto)->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1971 str_address(said_boilerplate.src.address, &sab),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1972 encap.inner_proto->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1973 str_address(said_boilerplate.dst.address, &dab),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1974 protocol_by_ipproto(said_boilerplate.transport_proto)->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1975 str_selector(said_boilerplate.dst.client, &dcb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1976 /* see above */{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1977 pri_shunk(said_boilerplate.sec_label),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1978 (st->st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" :{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1979 st->st_v1_acquired_sec_label.len > 0 ? " (IKEv1 acquired)" :{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1980 c->spd.this.sec_label.len > 0 ? " (IKEv2 this)" :{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1981 "")){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() %s %s-%s->[%s=%s=>%s]-%s->%s sec_label="
"%.*s""%s", __func__, said_boilerplate.inbound ? "inbound" : "outbound"
, str_selector(said_boilerplate.src.client, &scb), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_address(said_boilerplate
.src.address, &sab), encap.inner_proto->name, str_address
(said_boilerplate.dst.address, &dab), protocol_by_ipproto
(said_boilerplate.transport_proto)->name, str_selector(said_boilerplate
.dst.client, &dcb), ((int) (said_boilerplate.sec_label).len
), (const char *) ((said_boilerplate.sec_label).ptr), (st->
st_v1_seen_sec_label.len > 0 ? " (IKEv1 seen)" : st->st_v1_acquired_sec_label
.len > 0 ? " (IKEv1 acquired)" : c->spd.this.sec_label.
len > 0 ? " (IKEv2 this)" : "")); } }
1982
1983 /* set up IPCOMP SA, if any */
1984
1985 if (st->st_ipcomp.present) {
1986 ipsec_spi_t ipcomp_spi =
1987 inbound ? st->st_ipcomp.our_spi : st->st_ipcomp.attrs.spi;
1988 *said_next = said_boilerplate;
1989 said_next->spi = ipcomp_spi;
1990 said_next->esatype = ET_IPCOMP;
1991
1992 said_next->ipcomp_algo = st->st_ipcomp.attrs.transattrs.ta_comp;
1993 said_next->level = said_next - said;
1994 said_next->reqid = reqid_ipcomp(c->spd.reqid);
1995 said_next->story = said_str(route.dst.host_addr, &ip_protocol_compip_protocols[IPPROTO_COMP],
1996 ipcomp_spi, &text_ipcomp);
1997
1998 if (inbound) {
1999 /*
2000 * set corresponding outbound SA. We can do this on
2001 * each SA in the bundle without harm.
2002 */
2003 said_next->ref_peer = ref_peer;
2004 } else if (!outgoing_ref_set) {
2005 /* on outbound, pick up the SAref if not already done */
2006 said_next->ref = ref_peer;
2007 outgoing_ref_set = true1;
2008 }
2009
2010 if (!kernel_ops_add_sa(said_next, replace, st->st_logger)) {
2011 log_state(RC_LOG, st, "add_sa ipcomp failed");
2012 goto fail;
2013 }
2014
2015 /*
2016 * SA refs will have been allocated for this SA.
2017 * The inner most one is interesting for the outgoing SA,
2018 * since we refer to it in the policy that we instantiate.
2019 */
2020 if (new_ref_peer == IPSEC_SAREF_NULL((IPsecSAref_t)0u) && !inbound) {
2021 new_ref_peer = said_next->ref;
2022 if (kernel_ops->type != USE_XFRM && new_ref_peer == IPSEC_SAREF_NULL((IPsecSAref_t)0u))
2023 new_ref_peer = IPSEC_SAREF_NA((IPsecSAref_t)0xffff0001);
2024 }
2025 if (!incoming_ref_set && inbound) {
2026 st->st_ref = said_next->ref;
2027 incoming_ref_set = true1;
2028 }
2029 said_next++;
2030 }
2031
2032 /* set up ESP SA, if any */
2033
2034 if (st->st_esp.present) {
2035 ipsec_spi_t esp_spi =
2036 inbound ? st->st_esp.our_spi : st->st_esp.attrs.spi;
2037 uint8_t *esp_dst_keymat =
2038 inbound ? st->st_esp.our_keymat : st->st_esp.
2039 peer_keymat;
2040 const struct trans_attrs *ta = &st->st_esp.attrs.transattrs;
2041
2042 const struct ip_encap *encap_type = NULL((void*)0);
2043 uint16_t encap_sport = 0, encap_dport = 0;
2044 ip_address natt_oa;
2045
2046 if (st->hidden_variables.st_nat_traversal & NAT_T_DETECTED( ((lset_t)1 << (NATED_HOST)) | ((lset_t)1 << (NATED_PEER
)) )
||
2047 st->st_interface->protocol == &ip_protocol_tcpip_protocols[IPPROTO_TCP]) {
2048 encap_type = st->st_interface->protocol->encap_esp;
2049 if (inbound) {
2050 encap_sport = endpoint_hport(st->st_remote_endpoint);
2051 encap_dport = endpoint_hport(st->st_interface->local_endpoint);
2052 } else {
2053 encap_sport = endpoint_hport(st->st_interface->local_endpoint);
2054 encap_dport = endpoint_hport(st->st_remote_endpoint);
2055 }
2056 natt_oa = st->hidden_variables.st_nat_oa;
2057 dbg("kernel: natt/tcp sa encap_type="PRI_IP_ENCAP" sport=%d dport=%d",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: natt/tcp sa encap_type=""%u(%s)"" sport=%d dport=%d"
, (encap_type) == ((void*)0) ? 0 : (encap_type)->encap_type
, (encap_type) == ((void*)0) ? "none" : (encap_type)->name
, encap_sport, encap_dport); } }
2058 pri_ip_encap(encap_type), encap_sport, encap_dport){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: natt/tcp sa encap_type=""%u(%s)"" sport=%d dport=%d"
, (encap_type) == ((void*)0) ? 0 : (encap_type)->encap_type
, (encap_type) == ((void*)0) ? "none" : (encap_type)->name
, encap_sport, encap_dport); } }
;
2059 }
2060
2061 dbg("kernel: looking for alg with encrypt: %s keylen: %d integ: %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: looking for alg with encrypt: %s keylen: %d integ: %s"
, ta->ta_encrypt->common.fqn, ta->enckeylen, ta->
ta_integ->common.fqn); } }
2062 ta->ta_encrypt->common.fqn, ta->enckeylen, ta->ta_integ->common.fqn){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: looking for alg with encrypt: %s keylen: %d integ: %s"
, ta->ta_encrypt->common.fqn, ta->enckeylen, ta->
ta_integ->common.fqn); } }
;
2063
2064 /*
2065 * Check that both integrity and encryption are
2066 * supported by the kernel.
2067 *
2068 * Since the parser uses these exact same checks when
2069 * loading the connection, they should never fail (if
2070 * they do then strange things have been going on
2071 * since the connection was loaded).
2072 */
2073 if (!kernel_alg_integ_ok(ta->ta_integ)) {
2074 log_state(RC_LOG_SERIOUS, st,
2075 "ESP integrity algorithm %s is not implemented or allowed",
2076 ta->ta_integ->common.fqn);
2077 goto fail;
2078 }
2079 if (!kernel_alg_encrypt_ok(ta->ta_encrypt)) {
2080 log_state(RC_LOG_SERIOUS, st,
2081 "ESP encryption algorithm %s is not implemented or allowed",
2082 ta->ta_encrypt->common.fqn);
2083 goto fail;
2084 }
2085
2086 /*
2087 * Validate the encryption key size.
2088 */
2089 size_t encrypt_keymat_size;
2090 if (!kernel_alg_encrypt_key_size(ta->ta_encrypt, ta->enckeylen,
2091 &encrypt_keymat_size)) {
2092 log_state(RC_LOG_SERIOUS, st,
2093 "ESP encryption algorithm %s with key length %d not implemented or allowed",
2094 ta->ta_encrypt->common.fqn, ta->enckeylen);
2095 goto fail;
2096 }
2097
2098 /* Fixup key lengths for special cases */
2099#ifdef USE_3DES1
2100 if (ta->ta_encrypt == &ike_alg_encrypt_3des_cbc) {
2101 /* Grrrrr.... f*cking 7 bits jurassic algos */
2102 /* 168 bits in kernel, need 192 bits for keymat_len */
2103 if (encrypt_keymat_size == 21) {
2104 dbg("kernel: %s requires a 7-bit jurassic adjust",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s requires a 7-bit jurassic adjust",
ta->ta_encrypt->common.fqn); } }
2105 ta->ta_encrypt->common.fqn){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s requires a 7-bit jurassic adjust",
ta->ta_encrypt->common.fqn); } }
;
2106 encrypt_keymat_size = 24;
2107 }
2108 }
2109#endif
2110
2111 if (ta->ta_encrypt->salt_size > 0) {
2112 dbg("kernel: %s requires %zu salt bytes",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s requires %zu salt bytes", ta->ta_encrypt
->common.fqn, ta->ta_encrypt->salt_size); } }
2113 ta->ta_encrypt->common.fqn, ta->ta_encrypt->salt_size){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s requires %zu salt bytes", ta->ta_encrypt
->common.fqn, ta->ta_encrypt->salt_size); } }
;
2114 encrypt_keymat_size += ta->ta_encrypt->salt_size;
2115 }
2116
2117 size_t integ_keymat_size = ta->ta_integ->integ_keymat_size; /* BYTES */
2118
2119 dbg("kernel: st->st_esp.keymat_len=%" PRIu16 " is encrypt_keymat_size=%zu + integ_keymat_size=%zu",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: st->st_esp.keymat_len=%" "u" " is encrypt_keymat_size=%zu + integ_keymat_size=%zu"
, st->st_esp.keymat_len, encrypt_keymat_size, integ_keymat_size
); } }
2120 st->st_esp.keymat_len, encrypt_keymat_size, integ_keymat_size){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: st->st_esp.keymat_len=%" "u" " is encrypt_keymat_size=%zu + integ_keymat_size=%zu"
, st->st_esp.keymat_len, encrypt_keymat_size, integ_keymat_size
); } }
;
2121
2122 passert(st->st_esp.keymat_len == encrypt_keymat_size + integ_keymat_size)({ _Bool assertion__ = st->st_esp.keymat_len == encrypt_keymat_size
+ integ_keymat_size; if (!assertion__) { where_t here = ({ static
const struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2122, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "st->st_esp.keymat_len == encrypt_keymat_size + integ_keymat_size"
); } (void) 1; })
;
2123
2124 *said_next = said_boilerplate;
2125 said_next->spi = esp_spi;
2126 said_next->esatype = ET_ESP;
2127 said_next->replay_window = c->sa_replay_window;
2128 dbg("kernel: setting IPsec SA replay-window to %d", c->sa_replay_window){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: setting IPsec SA replay-window to %d"
, c->sa_replay_window); } }
;
2129
2130 if (c->xfrmi != NULL((void*)0)) {
2131 said_next->xfrm_if_id = c->xfrmi->if_id;
2132 said_next->mark_set = c->sa_marks.out;
2133 }
2134
2135 if (!inbound && c->sa_tfcpad != 0 && !st->st_seen_no_tfc) {
2136 dbg("kernel: Enabling TFC at %d bytes (up to PMTU)", c->sa_tfcpad){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: Enabling TFC at %d bytes (up to PMTU)"
, c->sa_tfcpad); } }
;
2137 said_next->tfcpad = c->sa_tfcpad;
2138 }
2139
2140 if (c->policy & POLICY_DECAP_DSCP((lset_t)1 << (POLICY_DECAP_DSCP_IX))) {
2141 dbg("kernel: Enabling Decap ToS/DSCP bits"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: Enabling Decap ToS/DSCP bits"); } }
;
2142 said_next->decap_dscp = true1;
2143 }
2144 if (c->policy & POLICY_NOPMTUDISC((lset_t)1 << (POLICY_NOPMTUDISC_IX))) {
2145 dbg("kernel: Disabling Path MTU Discovery"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: Disabling Path MTU Discovery"); } }
;
2146 said_next->nopmtudisc = true1;
2147 }
2148
2149 said_next->integ = ta->ta_integ;
2150#ifdef USE_SHA21
2151 if (said_next->integ == &ike_alg_integ_sha2_256 &&
2152 LIN(POLICY_SHA2_TRUNCBUG, c->policy)(((((lset_t)1 << (POLICY_SHA2_TRUNCBUG_IX))) & (c->
policy)) == (((lset_t)1 << (POLICY_SHA2_TRUNCBUG_IX))))
) {
2153 if (kernel_ops->sha2_truncbug_support) {
2154 if (libreswan_fipsmode() == 1) {
2155 log_state(RC_LOG_SERIOUS, st,
2156 "Error: sha2-truncbug=yes is not allowed in FIPS mode");
2157 goto fail;
2158 }
2159 dbg("kernel: authalg converted for sha2 truncation at 96bits instead of IETF's mandated 128bits"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: authalg converted for sha2 truncation at 96bits instead of IETF's mandated 128bits"
); } }
;
2160 /*
2161 * We need to tell the kernel to mangle
2162 * the sha2_256, as instructed by the user
2163 */
2164 said_next->integ = &ike_alg_integ_hmac_sha2_256_truncbug;
2165 } else {
2166 log_state(RC_LOG_SERIOUS, st,
2167 "Error: %s stack does not support sha2_truncbug=yes",
2168 kernel_ops->kern_name);
2169 goto fail;
2170 }
2171 }
2172#endif
2173 if (st->st_esp.attrs.transattrs.esn_enabled) {
2174 dbg("kernel: Enabling ESN"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: Enabling ESN"); } }
;
2175 said_next->esn = true1;
2176 }
2177
2178 /*
2179 * XXX: Assume SADB_ and ESP_ numbers match! Clearly
2180 * setting .compalg is wrong, don't yet trust
2181 * lower-level code to be right.
2182 */
2183 said_next->encrypt = ta->ta_encrypt;
2184
2185 /* divide up keying material */
2186 said_next->enckey = esp_dst_keymat;
2187 said_next->enckeylen = encrypt_keymat_size; /* BYTES */
2188 said_next->authkey = esp_dst_keymat + encrypt_keymat_size;
2189 said_next->authkeylen = integ_keymat_size; /* BYTES */
2190
2191 said_next->level = said_next - said;
2192 said_next->reqid = reqid_esp(c->spd.reqid);
2193
2194 said_next->src.encap_port = encap_sport;
2195 said_next->dst.encap_port = encap_dport;
2196 said_next->encap_type = encap_type;
2197 said_next->natt_oa = &natt_oa;
2198 said_next->story = said_str(route.dst.host_addr, &ip_protocol_espip_protocols[IPPROTO_ESP],
2199 esp_spi, &text_esp);
2200
2201 if (DBGP(DBG_PRIVATE)(cur_debugging & (((lset_t)1 << (DBG_PRIVATE_IX)))) || DBGP(DBG_CRYPT)(cur_debugging & (((lset_t)1 << (DBG_CRYPT_IX))))) {
2202 DBG_dump("ESP enckey:", said_next->enckey,
2203 said_next->enckeylen);
2204 DBG_dump("ESP authkey:", said_next->authkey,
2205 said_next->authkeylen);
2206 }
2207
2208 if (inbound) {
2209 /*
2210 * set corresponding outbound SA. We can do this on
2211 * each SA in the bundle without harm.
2212 */
2213 said_next->ref_peer = ref_peer;
2214 } else if (!outgoing_ref_set) {
2215 /* on outbound, pick up the SAref if not already done */
2216 said_next->ref = ref_peer;
2217 outgoing_ref_set = true1;
2218 }
2219 setup_esp_nic_offload(said_next, c, &nic_offload_fallback);
2220
2221 bool_Bool ret = kernel_ops_add_sa(said_next, replace, st->st_logger);
2222
2223 if (!ret && nic_offload_fallback &&
2224 said_next->nic_offload_dev != NULL((void*)0)) {
2225 /* Fallback to non-nic-offload crypto */
2226 said_next->nic_offload_dev = NULL((void*)0);
2227 ret = kernel_ops_add_sa(said_next, replace, st->st_logger);
2228 }
2229
2230 /* scrub keys from memory */
2231 memset(said_next->enckey, 0, said_next->enckeylen);
2232 memset(said_next->authkey, 0, said_next->authkeylen);
2233
2234 if (!ret)
2235 goto fail;
2236
2237 /*
2238 * SA refs will have been allocated for this SA.
2239 * The inner most one is interesting for the outgoing SA,
2240 * since we refer to it in the policy that we instantiate.
2241 */
2242 if (new_ref_peer == IPSEC_SAREF_NULL((IPsecSAref_t)0u) && !inbound) {
2243 new_ref_peer = said_next->ref;
2244 if (kernel_ops->type != USE_XFRM && new_ref_peer == IPSEC_SAREF_NULL((IPsecSAref_t)0u))
2245 new_ref_peer = IPSEC_SAREF_NA((IPsecSAref_t)0xffff0001);
2246 }
2247 if (!incoming_ref_set && inbound) {
2248 st->st_ref = said_next->ref;
2249 incoming_ref_set = true1;
2250 }
2251 said_next++;
2252 }
2253
2254 /* set up AH SA, if any */
2255
2256 if (st->st_ah.present) {
2257 ipsec_spi_t ah_spi =
2258 inbound ? st->st_ah.our_spi : st->st_ah.attrs.spi;
2259 uint8_t *ah_dst_keymat =
2260 inbound ? st->st_ah.our_keymat : st->st_ah.peer_keymat;
2261
2262 const struct integ_desc *integ = st->st_ah.attrs.transattrs.ta_integ;
2263 size_t keymat_size = integ->integ_keymat_size;
2264 int authalg = integ->integ_ikev1_ah_transform;
2265 if (authalg <= 0) {
2266 log_state(RC_LOG_SERIOUS, st,
2267 "%s not implemented",
2268 integ->common.fqn);
2269 goto fail;
2270 }
2271
2272 passert(st->st_ah.keymat_len == keymat_size)({ _Bool assertion__ = st->st_ah.keymat_len == keymat_size
; if (!assertion__) { where_t here = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2272, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "st->st_ah.keymat_len == keymat_size"
); } (void) 1; })
;
2273
2274 *said_next = said_boilerplate;
2275 said_next->spi = ah_spi;
2276 said_next->esatype = ET_AH;
2277 said_next->integ = integ;
2278 said_next->authkeylen = st->st_ah.keymat_len;
2279 said_next->authkey = ah_dst_keymat;
2280 said_next->level = said_next - said;
2281 said_next->reqid = reqid_ah(c->spd.reqid);
2282 said_next->story = said_str(route.dst.host_addr, &ip_protocol_ahip_protocols[IPPROTO_AH],
2283 ah_spi, &text_ah);
2284
2285 said_next->replay_window = c->sa_replay_window;
2286 dbg("kernel: setting IPsec SA replay-window to %d", c->sa_replay_window){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: setting IPsec SA replay-window to %d"
, c->sa_replay_window); } }
;
2287
2288 if (st->st_ah.attrs.transattrs.esn_enabled) {
2289 dbg("kernel: Enabling ESN"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: Enabling ESN"); } }
;
2290 said_next->esn = true1;
2291 }
2292
2293 if (DBGP(DBG_PRIVATE)(cur_debugging & (((lset_t)1 << (DBG_PRIVATE_IX)))) || DBGP(DBG_CRYPT)(cur_debugging & (((lset_t)1 << (DBG_CRYPT_IX))))) {
2294 DBG_dump("AH authkey:", said_next->authkey,
2295 said_next->authkeylen);
2296 }
2297
2298 if (inbound) {
2299 /*
2300 * set corresponding outbound SA. We can do this on
2301 * each SA in the bundle without harm.
2302 */
2303 said_next->ref_peer = ref_peer;
2304 } else if (!outgoing_ref_set) {
2305 /* on outbound, pick up the SAref if not already done */
2306 said_next->ref = ref_peer;
2307 outgoing_ref_set = true1; /* outgoing_ref_set not subsequently used */
Value stored to 'outgoing_ref_set' is never read
2308 }
2309
2310 if (!kernel_ops_add_sa(said_next, replace, st->st_logger)) {
2311 /* scrub key from memory */
2312 memset(said_next->authkey, 0, said_next->authkeylen);
2313 goto fail;
2314 }
2315 /* scrub key from memory */
2316 memset(said_next->authkey, 0, said_next->authkeylen);
2317
2318 /*
2319 * SA refs will have been allocated for this SA.
2320 * The inner most one is interesting for the outgoing SA,
2321 * since we refer to it in the policy that we instantiate.
2322 */
2323 if (new_ref_peer == IPSEC_SAREF_NULL((IPsecSAref_t)0u) && !inbound) {
2324 new_ref_peer = said_next->ref;
2325 if (kernel_ops->type != USE_XFRM && new_ref_peer == IPSEC_SAREF_NULL((IPsecSAref_t)0u))
2326 new_ref_peer = IPSEC_SAREF_NA((IPsecSAref_t)0xffff0001);
2327 }
2328 if (!incoming_ref_set && inbound) {
2329 st->st_ref = said_next->ref;
2330 incoming_ref_set = true1; /* incoming_ref_set not subsequently used */
2331 }
2332 said_next++;
2333 }
2334
2335 /*
2336 * Add an inbound eroute to enforce an arrival check.
2337 *
2338 * If inbound,
2339 * ??? and some more mysterious conditions,
2340 * Note reversed ends.
2341 * Not much to be done on failure.
2342 */
2343 dbg("kernel: %s() is thinking about installing inbound eroute? inbound=%d owner=#%lu %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() is thinking about installing inbound eroute? inbound=%d owner=#%lu %s"
, __func__, inbound, c->spd.eroute_owner, ({ enum encap_mode
e_ = encap.mode; (e_ == ENCAP_MODE_TUNNEL ? "tunnel" : e_ ==
ENCAP_MODE_TRANSPORT ? "transport" : "unknown"); })); } }
2344 __func__, inbound, c->spd.eroute_owner,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() is thinking about installing inbound eroute? inbound=%d owner=#%lu %s"
, __func__, inbound, c->spd.eroute_owner, ({ enum encap_mode
e_ = encap.mode; (e_ == ENCAP_MODE_TUNNEL ? "tunnel" : e_ ==
ENCAP_MODE_TRANSPORT ? "transport" : "unknown"); })); } }
2345 encap_mode_name(encap.mode)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() is thinking about installing inbound eroute? inbound=%d owner=#%lu %s"
, __func__, inbound, c->spd.eroute_owner, ({ enum encap_mode
e_ = encap.mode; (e_ == ENCAP_MODE_TUNNEL ? "tunnel" : e_ ==
ENCAP_MODE_TRANSPORT ? "transport" : "unknown"); })); } }
;
2346 if (inbound &&
2347 c->spd.eroute_owner == SOS_NOBODY0 &&
2348 (c->config->sec_label.len == 0 || c->ike_version == IKEv1)) {
2349 dbg("kernel: %s() is installing inbound eroute", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() is installing inbound eroute", __func__
); } }
;
2350 uint32_t xfrm_if_id = c->xfrmi != NULL((void*)0) ?
2351 c->xfrmi->if_id : 0;
2352
2353 /*
2354 * MCR - should be passed a spd_eroute structure here.
2355 *
2356 * Note: this and that are intentionally reversed
2357 * because the policy is inbound.
2358 *
2359 * XXX: yes, that is redundan - KP_ADD_INBOUND is
2360 * already indicating that the parameters are going to
2361 * need reversing ...
2362 */
2363 if (!raw_policy(KP_ADD_INBOUND,
2364 &route.src.host_addr, /* src_host */
2365 &route.src.client, /* src_client */
2366 &route.dst.host_addr, /* dst_host */
2367 &route.dst.client, /* dst_client */
2368 /*old*/htonl(SPI_IGNORE), /*new*/htonl(SPI_IGNORE),
2369 c->spd.this.protocol, /* transport_proto */
2370 encap.inner_proto->ipproto, /* esatype */
2371 &encap, /* " */
2372 deltatime(0), /* lifetime */
2373 calculate_sa_prio(c, false0), /* priority */
2374 &c->sa_marks, /* IPsec SA marks */
2375 xfrm_if_id,
2376 HUNK_AS_SHUNK(c->config->sec_label)({ typeof(c->config->sec_label) h_ = (c->config->
sec_label); shunk2(h_.ptr, h_.len); })
,
2377 st->st_logger,
2378 "%s() add inbound Child SA", __func__)) {
2379 llog(RC_LOG, st->st_logger,
2380 "raw_policy() in setup_half_ipsec_sa() failed to add inbound");
2381 }
2382 }
2383
2384 /* If there are multiple SPIs, group them. */
2385
2386 if (kernel_ops->grp_sa != NULL((void*)0) && said_next > &said[1]) {
2387 struct kernel_sa *s;
2388
2389 /*
2390 * group SAs, two at a time, inner to outer (backwards in
2391 * said[])
2392 *
2393 * The grouping is by pairs. So if said[] contains
2394 * ah esp ipip,
2395 *
2396 * the grouping would be ipip:esp, esp:ah.
2397 */
2398 for (s = said; s < said_next - 1; s++) {
2399 dbg("kernel: grouping %s (ref=%u) and %s (ref=%u)",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: grouping %s (ref=%u) and %s (ref=%u)"
, s[0].story, s[0].ref, s[1].story, s[1].ref); } }
2400 s[0].story, s[0].ref,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: grouping %s (ref=%u) and %s (ref=%u)"
, s[0].story, s[0].ref, s[1].story, s[1].ref); } }
2401 s[1].story, s[1].ref){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: grouping %s (ref=%u) and %s (ref=%u)"
, s[0].story, s[0].ref, s[1].story, s[1].ref); } }
;
2402 if (!kernel_ops->grp_sa(s + 1, s)) {
2403 log_state(RC_LOG, st, "grp_sa failed");
2404 goto fail;
2405 }
2406 }
2407 /* could update said, but it will not be used */
2408 }
2409
2410 if (new_ref_peer != IPSEC_SAREF_NULL((IPsecSAref_t)0u))
2411 st->st_ref_peer = new_ref_peer;
2412
2413 /* if the impaired is set, pretend this fails */
2414 if (impair.sa_creation) {
2415 DBG_log("Impair SA creation is set, pretending to fail");
2416 goto fail;
2417 }
2418 return true1;
2419
2420fail:
2421 log_state(RC_LOG, st, "setup_half_ipsec_sa() hit fail:");
2422 /* undo the done SPIs */
2423 while (said_next-- != said) {
2424 if (said_next->proto != 0) {
2425 del_spi(said_next->spi, said_next->proto,
2426 said_next->src.address, said_next->dst.address,
2427 st->st_logger);
2428 }
2429 }
2430 return false0;
2431}
2432
2433/*
2434 * XXX: Two cases:
2435 *
2436 * - the protocol was negotiated (and presumably installed)
2437 * (.present)
2438 *
2439 * - the protocol was proposed but never finished (.out_spi
2440 * inbound)
2441 */
2442
2443struct dead_spi { /* XXX: this is ip_said+src */
2444 const struct ip_protocol *protocol;
2445 ipsec_spi_t spi;
2446 ip_address src;
2447 ip_address dst;
2448};
2449
2450static unsigned append_teardown(struct dead_spi *dead, bool_Bool inbound,
2451 const struct ipsec_proto_info *proto,
2452 ip_address host_addr, ip_address effective_remote_address)
2453{
2454 bool_Bool present = proto->present;
2455 if (!present && inbound && proto->our_spi != 0 && proto->attrs.spi == 0) {
2456 dbg("kernel: forcing inbound delete of %s as .our_spi: "PRI_IPSEC_SPI"; attrs.spi: "PRI_IPSEC_SPI,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: forcing inbound delete of %s as .our_spi: "
"%08x""; attrs.spi: ""%08x", proto->protocol->name, htonl
(proto->our_spi), htonl(proto->attrs.spi)); } }
2457 proto->protocol->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: forcing inbound delete of %s as .our_spi: "
"%08x""; attrs.spi: ""%08x", proto->protocol->name, htonl
(proto->our_spi), htonl(proto->attrs.spi)); } }
2458 pri_ipsec_spi(proto->our_spi),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: forcing inbound delete of %s as .our_spi: "
"%08x""; attrs.spi: ""%08x", proto->protocol->name, htonl
(proto->our_spi), htonl(proto->attrs.spi)); } }
2459 pri_ipsec_spi(proto->attrs.spi)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: forcing inbound delete of %s as .our_spi: "
"%08x""; attrs.spi: ""%08x", proto->protocol->name, htonl
(proto->our_spi), htonl(proto->attrs.spi)); } }
;
2460 present = true1;
2461 }
2462 if (present) {
2463 dead->protocol = proto->protocol;
2464 if (inbound) {
2465 dead->spi = proto->our_spi; /* incoming */
2466 dead->src = effective_remote_address;
2467 dead->dst = host_addr;
2468 } else {
2469 dead->spi = proto->attrs.spi; /* outgoing */
2470 dead->src = host_addr;
2471 dead->dst = effective_remote_address;
2472 }
2473 return 1;
2474 }
2475 return 0;
2476}
2477
2478static bool_Bool teardown_half_ipsec_sa(struct state *st, bool_Bool inbound)
2479{
2480 /* Delete any AH, ESP, and IP in IP SPIs. */
2481
2482 struct connection *const c = st->st_connection;
2483
2484 /*
2485 * If we have a new address in c->spd.that.host_addr,
2486 * we are the initiator, have been redirected,
2487 * and yet this routine must use the old address.
2488 *
2489 * We point effective_remote_host_address to the appropriate
2490 * address.
2491 */
2492
2493 ip_address effective_remote_address = c->spd.that.host_addr;
2494 if (!endpoint_address_eq_address(st->st_remote_endpoint, effective_remote_address) &&
2495 address_is_specified(c->temp_vars.redirect_ip)) {
2496 effective_remote_address = endpoint_address(st->st_remote_endpoint);
2497 }
2498
2499 /* ??? CLANG 3.5 thinks that c might be NULL */
2500 if (inbound && c->spd.eroute_owner == SOS_NOBODY0 &&
2501 !raw_policy(KP_DELETE_INBOUND,
2502 &effective_remote_address,
2503 &c->spd.that.client,
2504 &c->spd.this.host_addr,
2505 &c->spd.this.client,
2506 htonl(SPI_IGNORE), htonl(SPI_IGNORE),
2507 c->spd.this.protocol,
2508 c->ipsec_mode == ENCAPSULATION_MODE_TRANSPORT ?
2509 ET_ESP : ET_UNSPEC,
2510 esp_transport_proto_info&esp_transport_kernel_encap,
2511 deltatime(0),
2512 calculate_sa_prio(c, false0),
2513 &c->sa_marks,
2514 0, /* xfrm_if_id. needed to tear down? */
2515 /*sec_label:always-null*/null_shunk,
2516 st->st_logger,
2517 "%s() teardown inbound Child SA", __func__)) {
2518 llog(RC_LOG, st->st_logger,
2519 "raw_policy in teardown_half_ipsec_sa() failed to delete inbound");
2520 }
2521
2522 /* collect each proto SA that needs deleting */
2523
2524 struct dead_spi dead[3]; /* at most 3 entries */
2525 unsigned nr = 0;
2526 nr += append_teardown(dead + nr, inbound, &st->st_ah,
2527 c->spd.this.host_addr, effective_remote_address);
2528 nr += append_teardown(dead + nr, inbound, &st->st_esp,
2529 c->spd.this.host_addr, effective_remote_address);
2530 nr += append_teardown(dead + nr, inbound, &st->st_ipcomp,
2531 c->spd.this.host_addr, effective_remote_address);
2532 passert(nr < elemsof(dead))({ _Bool assertion__ = nr < (sizeof(dead) / sizeof(*(dead)
)); if (!assertion__) { where_t here = ({ static const struct
where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2532, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "nr < (sizeof(dead) / sizeof(*(dead)))"
); } (void) 1; })
;
2533
2534 /*
2535 * If the SAs have been grouped, deleting any one will do: we
2536 * just delete the first one found.
2537 */
2538 if (kernel_ops->grp_sa != NULL((void*)0) && nr > 1) {
2539 nr = 1;
2540 }
2541
2542 /* delete each proto that needs deleting */
2543 bool_Bool result = true1;
2544
2545 for (unsigned i = 0; i < nr; i++) {
2546 const struct dead_spi *tbd = &dead[i];
2547 result &= del_spi(tbd->spi, tbd->protocol, &tbd->src, &tbd->dst, st->st_logger);
2548 }
2549
2550 return result;
2551}
2552
2553static event_callback_routine kernel_process_msg_cb;
2554
2555static void kernel_process_msg_cb(evutil_socket_tint fd,
2556 const short event UNUSED__attribute__ ((unused)),
2557 void *arg)
2558{
2559 struct logger logger[1] = { GLOBAL_LOGGER(null_fd)(struct logger) { .where = ({ static const struct where here =
{ .func = __func__, .file = "programs/pluto/kernel.c", .line
= 2559, }; &here; }), .global_whackfd = ((struct fd *) (
(void*)0)), .object = ((void*)0), .object_vec = &logger_global_vec
, }
, }; /* event-handler */
2560 const struct kernel_ops *kernel_ops = arg;
2561
2562 dbg("kernel: %s process netlink message", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s process netlink message", __func__
); } }
;
2563 threadtime_t start = threadtime_start();
2564 kernel_ops->process_msg(fd, logger);
2565 threadtime_stop(&start, SOS_NOBODY0, "kernel message");
2566}
2567
2568static global_timer_cb kernel_process_queue_cb;
2569
2570static void kernel_process_queue_cb(struct logger *unused_logger UNUSED__attribute__ ((unused)))
2571{
2572 if (pexpect(kernel_ops->process_queue != NULL)({ _Bool assertion__ = kernel_ops->process_queue != ((void
*)0); if (!assertion__) { where_t here_ = ({ static const struct
where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2572, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "kernel_ops->process_queue != ((void*)0)"
); } assertion__; })
) {
2573 kernel_ops->process_queue();
2574 }
2575}
2576
2577const struct kernel_ops *kernel_ops =
2578#ifdef XFRM_SUPPORT1
2579 &xfrm_kernel_ops
2580#endif
2581#ifdef BSD_KAME
2582 &bsdkame_kernel_ops
2583#endif
2584 ;
2585
2586deltatime_t bare_shunt_interval = DELTATIME_INIT(SHUNT_SCAN_INTERVAL){ .dt = { .tv_sec = ((2 * 10)), } };
2587
2588void init_kernel(struct logger *logger)
2589{
2590 struct utsname un;
2591
2592 /* get kernel version */
2593 uname(&un);
2594 llog(RC_LOG, logger,
2595 "using %s %s kernel support code on %s",
2596 un.sysname, kernel_ops->kern_name, un.version);
2597
2598 passert(kernel_ops->init != NULL)({ _Bool assertion__ = kernel_ops->init != ((void*)0); if (
!assertion__) { where_t here = ({ static const struct where here
= { .func = __func__, .file = "programs/pluto/kernel.c", .line
= 2598, }; &here; }); const struct logger *logger_ = &
failsafe_logger; llog_passert(logger_, here, "%s", "kernel_ops->init != ((void*)0)"
); } (void) 1; })
;
2599 kernel_ops->init(logger);
2600
2601 /* Add the port bypass polcies */
2602
2603 if (kernel_ops->v6holes != NULL((void*)0)) {
2604 /* may not return */
2605 kernel_ops->v6holes(logger);
2606 }
2607
2608 /* register SA types that we can negotiate */
2609 if (kernel_ops->pfkey_register != NULL((void*)0))
2610 kernel_ops->pfkey_register();
2611
2612 enable_periodic_timer(EVENT_SHUNT_SCAN, kernel_scan_shunts,
2613 bare_shunt_interval);
2614
2615 dbg("kernel: setup kernel fd callback"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: setup kernel fd callback"); } }
;
2616
2617 if (kernel_ops->async_fdp != NULL((void*)0))
2618 /* Note: kernel_ops is const but pluto_event_add cannot know that */
2619 add_fd_read_event_handler(*kernel_ops->async_fdp, kernel_process_msg_cb,
2620 (void *)kernel_ops, "KERNEL_XRM_FD");
2621
2622 if (kernel_ops->route_fdp != NULL((void*)0) && *kernel_ops->route_fdp > NULL_FD(-1)) {
2623 add_fd_read_event_handler(*kernel_ops->route_fdp, kernel_process_msg_cb,
2624 (void *)kernel_ops, "KERNEL_ROUTE_FD");
2625 }
2626
2627 if (kernel_ops->process_queue != NULL((void*)0)) {
2628 /*
2629 * AA_2015 this is untested code. only for non xfrm ???
2630 * It seems in klips we should, besides kernel_process_msg,
2631 * call process_queue periodically. Does the order
2632 * matter?
2633 */
2634 enable_periodic_timer(EVENT_PROCESS_KERNEL_QUEUE,
2635 kernel_process_queue_cb,
2636 deltatime(KERNEL_PROCESS_Q_PERIOD1));
2637 }
2638}
2639
2640void show_kernel_interface(struct show *s)
2641{
2642 if (kernel_ops != NULL((void*)0)) {
2643 show_comment(s, "using kernel interface: %s",
2644 kernel_ops->kern_name);
2645 }
2646}
2647
2648/*
2649 * see if the attached connection refers to an older state.
2650 * if it does, then initiate this state with the appropriate outgoing
2651 * references, such that we won't break any userland applications
2652 * that are using the conn with REFINFO.
2653 */
2654static void look_for_replacement_state(struct state *st)
2655{
2656 struct connection *c = st->st_connection;
2657 struct state *ost = state_with_serialno(c->newest_ipsec_sa);
2658
2659 if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) {
2660 DBG_log("checking if this is a replacement state");
2661 DBG_log(" st=%p ost=%p st->serialno=#%lu ost->serialno=#%lu",
2662 st, ost, st->st_serialno,
2663 ost == NULL((void*)0) ? 0 : ost->st_serialno);
2664 }
2665
2666 if (ost != NULL((void*)0) && ost != st && ost->st_serialno != st->st_serialno) {
2667 /*
2668 * then there is an old state associated, and it is
2669 * different then the new one.
2670 */
2671 dbg("kernel: keeping ref_peer=%" PRIu32 " during rekey", ost->st_ref_peer){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: keeping ref_peer=%" "u" " during rekey"
, ost->st_ref_peer); } }
;
2672 st->st_ref_peer = ost->st_ref_peer;
2673 }
2674}
2675
2676/*
2677 * Note: install_inbound_ipsec_sa is only used by the Responder.
2678 * The Responder will subsequently use install_ipsec_sa for the outbound.
2679 * The Initiator uses install_ipsec_sa to install both at once.
2680 */
2681bool_Bool install_inbound_ipsec_sa(struct state *st)
2682{
2683 struct connection *const c = st->st_connection;
2684
2685 /*
2686 * If our peer has a fixed-address client, check if we already
2687 * have a route for that client that conflicts. We will take this
2688 * as proof that that route and the connections using it are
2689 * obsolete and should be eliminated. Interestingly, this is
2690 * the only case in which we can tell that a connection is obsolete.
2691 */
2692 passert(c->kind == CK_PERMANENT || c->kind == CK_INSTANCE)({ _Bool assertion__ = c->kind == CK_PERMANENT || c->kind
== CK_INSTANCE; if (!assertion__) { where_t here = ({ static
const struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2692, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "c->kind == CK_PERMANENT || c->kind == CK_INSTANCE"
); } (void) 1; })
;
2693 if (c->spd.that.has_client) {
2694 for (;; ) {
2695 struct spd_route *esr; /* value is ignored */
2696 struct connection *o = route_owner(c, &c->spd, &esr,
2697 NULL((void*)0), NULL((void*)0));
2698
2699 if (o == NULL((void*)0) || c == o)
2700 break; /* nobody interesting has a route */
2701
2702 /* note: we ignore the client addresses at this end */
2703 if (sameaddr(&o->spd.that.host_addr,
2704 &c->spd.that.host_addr) &&
2705 o->interface == c->interface)
2706 break; /* existing route is compatible */
2707
2708 if (kernel_ops->overlap_supported) {
2709 /*
2710 * Both are transport mode, allow overlapping.
2711 * [bart] not sure if this is actually
2712 * intended, but am leaving it in to make it
2713 * behave like before
2714 */
2715 if (!LIN(POLICY_TUNNEL, c->policy | o->policy)(((((lset_t)1 << (POLICY_TUNNEL_IX))) & (c->policy
| o->policy)) == (((lset_t)1 << (POLICY_TUNNEL_IX))
))
)
2716 break;
2717
2718 /* Both declared that overlapping is OK. */
2719 if (LIN(POLICY_OVERLAPIP, c->policy & o->policy)(((((lset_t)1 << (POLICY_OVERLAPIP_IX))) & (c->policy
& o->policy)) == (((lset_t)1 << (POLICY_OVERLAPIP_IX
))))
)
2720 break;
2721 }
2722
2723 address_buf b;
2724 connection_buf cib;
2725 log_state(RC_LOG_SERIOUS, st,
2726 "route to peer's client conflicts with "PRI_CONNECTION"\"%s\"%s"" %s; releasing old connection to free the route",
2727 pri_connection(o, &cib)(o)->name, str_connection_instance(o, &cib),
2728 str_address_sensitive(&o->spd.that.host_addr, &b));
2729 if (o->kind == CK_INSTANCE) {
2730 delete_connection(&o, /*relations?*/false0);
2731 } else {
2732 release_connection(o, /*relations?*/false0);
2733 }
2734 }
2735 }
2736
2737 dbg("kernel: install_inbound_ipsec_sa() checking if we can route"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: install_inbound_ipsec_sa() checking if we can route"
); } }
;
2738 /* check that we will be able to route and eroute */
2739 switch (could_route(c, st->st_logger)) {
2740 case route_easy:
2741 case route_nearconflict:
2742 dbg("kernel: routing is easy, or has resolvable near-conflict"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: routing is easy, or has resolvable near-conflict"
); } }
;
2743 break;
2744
2745 case route_unnecessary:
2746 /*
2747 * in this situation, we should look and see if there is
2748 * a state that our connection references, that we are
2749 * in fact replacing.
2750 */
2751 break;
2752
2753 default:
2754 return false0;
2755 }
2756
2757 look_for_replacement_state(st);
2758
2759 /*
2760 * we now have to set up the outgoing SA first, so that
2761 * we can refer to it in the incoming SA.
2762 */
2763 if (st->st_ref_peer == IPSEC_SAREF_NULL((IPsecSAref_t)0u) && !st->st_outbound_done) {
2764 dbg("kernel: installing outgoing SA now as ref_peer=%u", st->st_ref_peer){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: installing outgoing SA now as ref_peer=%u"
, st->st_ref_peer); } }
;
2765 if (!setup_half_ipsec_sa(st, false0)) {
2766 DBG_log("failed to install outgoing SA: %u",
2767 st->st_ref_peer);
2768 return false0;
2769 }
2770
2771 st->st_outbound_done = true1;
2772 }
2773 dbg("kernel: outgoing SA has ref_peer=%u", st->st_ref_peer){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: outgoing SA has ref_peer=%u", st->
st_ref_peer); } }
;
2774
2775 /* (attempt to) actually set up the SAs */
2776
2777 return setup_half_ipsec_sa(st, true1);
2778}
2779
2780/* Install a route and then a prospective shunt eroute or an SA group eroute.
2781 * Assumption: could_route gave a go-ahead.
2782 * Any SA Group must have already been created.
2783 * On failure, steps will be unwound.
2784 */
2785bool_Bool route_and_eroute(struct connection *c,
2786 struct spd_route *sr,
2787 struct state *st/*can be NULL*/,
2788 struct logger *logger/*st or c */)
2789{
2790 selectors_buf sb;
2791 dbg("kernel: route_and_eroute() for %s; proto %d, and source port %d dest port %d sec_label",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute() for %s; proto %d, and source port %d dest port %d sec_label"
, str_selectors(&sr->this.client, &sr->that.client
, &sb), sr->this.protocol, sr->this.port, sr->that
.port); } }
2792 str_selectors(&sr->this.client, &sr->that.client, &sb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute() for %s; proto %d, and source port %d dest port %d sec_label"
, str_selectors(&sr->this.client, &sr->that.client
, &sb), sr->this.protocol, sr->this.port, sr->that
.port); } }
2793 sr->this.protocol, sr->this.port, sr->that.port){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute() for %s; proto %d, and source port %d dest port %d sec_label"
, str_selectors(&sr->this.client, &sr->that.client
, &sb), sr->this.protocol, sr->this.port, sr->that
.port); } }
;
2794#if 0
2795 /* XXX: apparently not so */
2796 pexpect(sr->this.client.addr.ipproto == sr->this.protocol)({ _Bool assertion__ = sr->this.client.addr.ipproto == sr->
this.protocol; if (!assertion__) { where_t here_ = ({ static const
struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2796, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "sr->this.client.addr.ipproto == sr->this.protocol"
); } assertion__; })
;
2797 pexpect(sr->that.client.addr.ipproto == sr->that.protocol)({ _Bool assertion__ = sr->that.client.addr.ipproto == sr->
that.protocol; if (!assertion__) { where_t here_ = ({ static const
struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2797, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "sr->that.client.addr.ipproto == sr->that.protocol"
); } assertion__; })
;
2798 pexpect(sr->this.client.addr.hport == sr->this.port)({ _Bool assertion__ = sr->this.client.addr.hport == sr->
this.port; if (!assertion__) { where_t here_ = ({ static const
struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2798, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "sr->this.client.addr.hport == sr->this.port"
); } assertion__; })
;
2799 pexpect(sr->that.client.addr.hport == sr->that.port)({ _Bool assertion__ = sr->that.client.addr.hport == sr->
that.port; if (!assertion__) { where_t here_ = ({ static const
struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2799, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "sr->that.client.addr.hport == sr->that.port"
); } assertion__; })
;
2800#endif
2801
2802 /* XXX: ... so make it so */
2803 update_selector_hport(&sr->this.client, sr->this.port){ (&sr->this.client)->hport = (sr->this.port); };
2804 update_selector_hport(&sr->that.client, sr->that.port){ (&sr->that.client)->hport = (sr->that.port); };
2805#if 0
2806 sr->this.client.addr.ipproto = sr->this.protocol;
2807 sr->that.client.addr.ipproto = sr->that.protocol;
2808#endif
2809
2810 struct spd_route *esr, *rosr;
2811 struct connection *ero;
2812 struct connection *ro = route_owner(c, sr, &rosr, &ero, &esr); /* who, if anyone, owns our eroute? */
2813
2814 dbg("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
2815 c->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
2816 (c->policy_next ? c->policy_next->name : "none"),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
2817 ero == NULL ? "null" : ero->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
2818 esr,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
2819 ro == NULL ? "null" : ro->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
2820 rosr,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
2821 st == NULL ? 0 : st->st_serialno){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute with c: %s (next: %s) ero:%s esr:{%p} ro:%s rosr:{%p} and state: #%lu"
, c->name, (c->policy_next ? c->policy_next->name
: "none"), ero == ((void*)0) ? "null" : ero->name, esr, ro
== ((void*)0) ? "null" : ro->name, rosr, st == ((void*)0)
? 0 : st->st_serialno); } }
;
2822
2823 /* look along the chain of policies for same one */
2824
2825 /* we should look for dest port as well? */
2826 /* ports are now switched to the ones in this.client / that.client ??????? */
2827 /* but port set is sr->this.port and sr.that.port ! */
2828 struct bare_shunt **bspp = ((ero == NULL((void*)0)) ? bare_shunt_ptr(&sr->this.client,
2829 &sr->that.client,
2830 sr->this.protocol,
2831 "route and eroute") :
2832 NULL((void*)0));
2833
2834 /* install the eroute */
2835
2836 bool_Bool eroute_installed = false0;
2837
2838#ifdef IPSEC_CONNECTION_LIMIT
2839 bool_Bool new_eroute = false0;
2840#endif
2841
2842 passert(bspp == NULL || ero == NULL)({ _Bool assertion__ = bspp == ((void*)0) || ero == ((void*)0
); if (!assertion__) { where_t here = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2842, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "bspp == ((void*)0) || ero == ((void*)0)"
); } (void) 1; })
; /* only one non-NULL */
2843
2844 if (bspp != NULL((void*)0) || ero != NULL((void*)0)) {
2845 dbg("kernel: we are replacing an eroute"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: we are replacing an eroute"); } }
;
2846 /* if no state provided, then install a shunt for later */
2847 if (st == NULL((void*)0)) {
2848 eroute_installed = shunt_policy(KP_REPLACE_OUTBOUND, c, sr,
2849 RT_ROUTED_PROSPECTIVE,
2850 "route_and_eroute() replace shunt",
2851 logger);
2852 } else {
2853 eroute_installed = sag_eroute(st, sr, KP_REPLACE_OUTBOUND,
2854 "route_and_eroute() replace sag");
2855 }
2856
2857 /* remember to free bspp if we make it out of here alive */
2858 } else {
2859 /* we're adding an eroute */
2860#ifdef IPSEC_CONNECTION_LIMIT
2861 if (num_ipsec_eroute == IPSEC_CONNECTION_LIMIT) {
2862 llog(RC_LOG_SERIOUS, logger,
2863 "Maximum number of IPsec connections reached (%d)",
2864 IPSEC_CONNECTION_LIMIT);
2865 return false0;
2866 }
2867 new_eroute = true1;
2868#endif
2869
2870 /* if no state provided, then install a shunt for later */
2871 if (st == NULL((void*)0)) {
2872 eroute_installed = shunt_policy(KP_ADD_OUTBOUND, c, sr,
2873 RT_ROUTED_PROSPECTIVE,
2874 "route_and_eroute() add",
2875 logger);
2876 } else {
2877 eroute_installed = sag_eroute(st, sr, KP_ADD_OUTBOUND, "add");
2878 }
2879 }
2880
2881 /* notify the firewall of a new tunnel */
2882
2883 bool_Bool firewall_notified = false0;
2884
2885 if (eroute_installed) {
2886 /*
2887 * do we have to notify the firewall?
2888 * Yes, if we are installing
2889 * a tunnel eroute and the firewall wasn't notified
2890 * for a previous tunnel with the same clients. Any Previous
2891 * tunnel would have to be for our connection, so the actual
2892 * test is simple.
2893 */
2894 firewall_notified = st == NULL((void*)0) || /* not a tunnel eroute */
2895 sr->eroute_owner != SOS_NOBODY0 || /* already notified */
2896 do_command(c, sr, "up", st, logger); /* go ahead and notify */
2897 }
2898
2899 /* install the route */
2900
2901 bool_Bool route_installed = false0;
2902
2903 dbg("kernel: route_and_eroute: firewall_notified: %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: firewall_notified: %s"
, firewall_notified ? "true" : "false"); } }
2904 firewall_notified ? "true" : "false"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: firewall_notified: %s"
, firewall_notified ? "true" : "false"); } }
;
2905 if (!firewall_notified) {
2906 /* we're in trouble -- don't do routing */
2907 } else if (ro == NULL((void*)0)) {
2908 /* a new route: no deletion required, but preparation is */
2909 if (!do_command(c, sr, "prepare", st, logger))
2910 dbg("kernel: prepare command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: prepare command returned an error"); }
}
;
2911 route_installed = do_command(c, sr, "route", st, logger);
2912 if (!route_installed)
2913 dbg("kernel: route command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route command returned an error"); } }
;
2914 } else if (routed(sr->routing)((sr->routing) > RT_UNROUTED_HOLD) ||
2915 routes_agree(ro, c)((ro)->interface->ip_dev == (c)->interface->ip_dev
&& sameaddr(&(ro)->spd.this.host_nexthop, &
(c)->spd.this.host_nexthop))
) {
2916 route_installed = true1; /* nothing to be done */
2917 } else {
2918 /*
2919 * Some other connection must own the route
2920 * and the route must disagree. But since could_route
2921 * must have allowed our stealing it, we'll do so.
2922 *
2923 * A feature of LINUX allows us to install the new route
2924 * before deleting the old if the nexthops differ.
2925 * This reduces the "window of vulnerability" when packets
2926 * might flow in the clear.
2927 */
2928 if (sameaddr(&sr->this.host_nexthop,
2929 &esr->this.host_nexthop)) {
2930 if (!do_command(ro, sr, "unroute", st, logger)) {
2931 dbg("kernel: unroute command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: unroute command returned an error"); }
}
;
2932 }
2933 route_installed = do_command(c, sr, "route", st, logger);
2934 if (!route_installed)
2935 dbg("kernel: route command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route command returned an error"); } }
;
2936 } else {
2937 route_installed = do_command(c, sr, "route", st, logger);
2938 if (!route_installed)
2939 dbg("kernel: route command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route command returned an error"); } }
;
2940
2941 if (!do_command(ro, sr, "unroute", st, logger)) {
2942 dbg("kernel: unroute command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: unroute command returned an error"); }
}
;
2943 }
2944 }
2945
2946 /* record unrouting */
2947 if (route_installed) {
2948 do {
2949 dbg("kernel: installed route: ro name=%s, rosr->routing=%d", ro->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: installed route: ro name=%s, rosr->routing=%d"
, ro->name, rosr->routing); } }
2950 rosr->routing){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: installed route: ro name=%s, rosr->routing=%d"
, ro->name, rosr->routing); } }
;
2951 pexpect(!erouted(rosr->routing))({ _Bool assertion__ = !((rosr->routing) != RT_UNROUTED); if
(!assertion__) { where_t here_ = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 2951, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_pexpect(logger_, here_, "%s", "!((rosr->routing) != RT_UNROUTED)"
); } assertion__; })
; /* warn for now - requires fixing */
2952 rosr->routing = RT_UNROUTED;
2953
2954 /* no need to keep old value */
2955 ro = route_owner(c, sr, &rosr, NULL((void*)0), NULL((void*)0));
2956 } while (ro != NULL((void*)0));
2957 }
2958 }
2959
2960 /* all done -- clean up */
2961 if (route_installed) {
2962 /* Success! */
2963
2964 if (bspp != NULL((void*)0)) {
2965 free_bare_shunt(bspp);
2966 } else if (ero != NULL((void*)0) && ero != c) {
2967 /* check if ero is an ancestor of c. */
2968 struct connection *ero2;
2969
2970 for (ero2 = c; ero2 != NULL((void*)0) && ero2 != c;
2971 ero2 = ero2->policy_next)
2972 ;
2973
2974 if (ero2 == NULL((void*)0)) {
2975 /*
2976 * By elimination, we must be eclipsing ero.
2977 * Checked above.
2978 */
2979 if (ero->spd.routing != RT_ROUTED_ECLIPSED) {
2980 ero->spd.routing = RT_ROUTED_ECLIPSED;
2981 eclipse_count++;
2982 }
2983 }
2984 }
2985
2986 if (st == NULL((void*)0)) {
2987 passert(sr->eroute_owner == SOS_NOBODY)({ _Bool assertion__ = sr->eroute_owner == 0; if (!assertion__
) { where_t here = ({ static const struct where here = { .func
= __func__, .file = "programs/pluto/kernel.c", .line = 2987,
}; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_passert(logger_, here, "%s", "sr->eroute_owner == 0"
); } (void) 1; })
;
2988 sr->routing = RT_ROUTED_PROSPECTIVE;
2989 } else {
2990 sr->routing = RT_ROUTED_TUNNEL;
2991 connection_buf cib;
2992 dbg("kernel: route_and_eroute: instance "PRI_CONNECTION", setting eroute_owner {spd=%p,sr=%p} to #%lu (was #%lu) (newest_ipsec_sa=#%lu)",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: instance ""\"%s\"%s"
", setting eroute_owner {spd=%p,sr=%p} to #%lu (was #%lu) (newest_ipsec_sa=#%lu)"
, (st->st_connection)->name, str_connection_instance(st
->st_connection, &cib), &st->st_connection->
spd, sr, st->st_serialno, sr->eroute_owner, st->st_connection
->newest_ipsec_sa); } }
2993 pri_connection(st->st_connection, &cib),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: instance ""\"%s\"%s"
", setting eroute_owner {spd=%p,sr=%p} to #%lu (was #%lu) (newest_ipsec_sa=#%lu)"
, (st->st_connection)->name, str_connection_instance(st
->st_connection, &cib), &st->st_connection->
spd, sr, st->st_serialno, sr->eroute_owner, st->st_connection
->newest_ipsec_sa); } }
2994 &st->st_connection->spd, sr,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: instance ""\"%s\"%s"
", setting eroute_owner {spd=%p,sr=%p} to #%lu (was #%lu) (newest_ipsec_sa=#%lu)"
, (st->st_connection)->name, str_connection_instance(st
->st_connection, &cib), &st->st_connection->
spd, sr, st->st_serialno, sr->eroute_owner, st->st_connection
->newest_ipsec_sa); } }
2995 st->st_serialno,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: instance ""\"%s\"%s"
", setting eroute_owner {spd=%p,sr=%p} to #%lu (was #%lu) (newest_ipsec_sa=#%lu)"
, (st->st_connection)->name, str_connection_instance(st
->st_connection, &cib), &st->st_connection->
spd, sr, st->st_serialno, sr->eroute_owner, st->st_connection
->newest_ipsec_sa); } }
2996 sr->eroute_owner,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: instance ""\"%s\"%s"
", setting eroute_owner {spd=%p,sr=%p} to #%lu (was #%lu) (newest_ipsec_sa=#%lu)"
, (st->st_connection)->name, str_connection_instance(st
->st_connection, &cib), &st->st_connection->
spd, sr, st->st_serialno, sr->eroute_owner, st->st_connection
->newest_ipsec_sa); } }
2997 st->st_connection->newest_ipsec_sa){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: route_and_eroute: instance ""\"%s\"%s"
", setting eroute_owner {spd=%p,sr=%p} to #%lu (was #%lu) (newest_ipsec_sa=#%lu)"
, (st->st_connection)->name, str_connection_instance(st
->st_connection, &cib), &st->st_connection->
spd, sr, st->st_serialno, sr->eroute_owner, st->st_connection
->newest_ipsec_sa); } }
;
2998 sr->eroute_owner = st->st_serialno;
2999 /* clear host shunts that clash with freshly installed route */
3000 clear_narrow_holds(&sr->this.client, &sr->that.client,
3001 sr->this.protocol, logger);
3002 }
3003
3004#ifdef IPSEC_CONNECTION_LIMIT
3005 if (new_eroute) {
3006 num_ipsec_eroute++;
3007 llog(RC_COMMENT, logger,
3008 "%d IPsec connections are currently being managed",
3009 num_ipsec_eroute);
3010 }
3011#endif
3012
3013 return true1;
3014 } else {
3015 /* Failure! Unwind our work. */
3016 if (firewall_notified && sr->eroute_owner == SOS_NOBODY0) {
3017 if (!do_command(c, sr, "down", st, logger))
3018 dbg("kernel: down command returned an error"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: down command returned an error"); } }
;
3019 }
3020
3021 if (eroute_installed) {
3022 /*
3023 * Restore original eroute, if we can.
3024 * Since there is nothing much to be done if
3025 * the restoration fails, ignore success or failure.
3026 */
3027 if (bspp != NULL((void*)0)) {
3028 /*
3029 * Restore old bare_shunt.
3030 * I don't think that this case is very likely.
3031 * Normally a bare shunt would have been
3032 * assigned to a connection before we've
3033 * gotten this far.
3034 */
3035 struct bare_shunt *bs = *bspp;
3036
3037 ip_address dst = said_address(bs->said);
3038 if (!raw_policy(KP_REPLACE_OUTBOUND,
3039 &dst, /* should be useless */
3040 &bs->our_client,
3041 &dst, /* should be useless */
3042 &bs->peer_client,
3043 bs->said.spi, /* unused? network order */
3044 bs->said.spi, /* network order */
3045 sr->this.protocol, /* transport_proto */
3046 ET_INT,
3047 esp_transport_proto_info&esp_transport_kernel_encap,
3048 deltatime(SHUNT_PATIENCE((2 * 10) * 15 / 2)),
3049 calculate_sa_prio(c, false0),
3050 NULL((void*)0),
3051 0,
3052 /* bare shunt are not associated with any connection so no security label */
3053 null_shunk, logger,
3054 "%s() restore", __func__)) {
3055 llog(RC_LOG, logger,
3056 "raw_policy() in route_and_eroute() failed to restore/replace SA");
3057 }
3058 } else if (ero != NULL((void*)0)) {
3059 passert(esr != NULL)({ _Bool assertion__ = esr != ((void*)0); if (!assertion__) {
where_t here = ({ static const struct where here = { .func =
__func__, .file = "programs/pluto/kernel.c", .line = 3059, }
; &here; }); const struct logger *logger_ = &failsafe_logger
; llog_passert(logger_, here, "%s", "esr != ((void*)0)"); } (
void) 1; })
;
3060 /* restore ero's former glory */
3061 if (esr->eroute_owner == SOS_NOBODY0) {
3062 /* note: normal or eclipse case */
3063 if (!shunt_policy(KP_REPLACE_OUTBOUND,
3064 ero, esr, esr->routing,
3065 "route_and_eroute() restore",
3066 logger)) {
3067 llog(RC_LOG, logger,
3068 "shunt_policy() in route_and_eroute() failed restore/replace");
3069 }
3070 } else {
3071 /*
3072 * Try to find state that owned eroute.
3073 * Don't do anything if it cannot be
3074 * found.
3075 * This case isn't likely since we
3076 * don't run the updown script when
3077 * replacing a SA group with its
3078 * successor (for the same conn).
3079 */
3080 struct state *ost =
3081 state_with_serialno(
3082 esr->eroute_owner);
3083
3084 if (ost != NULL((void*)0)) {
3085 if (!sag_eroute(ost, esr,
3086 KP_REPLACE_OUTBOUND,
3087 "restore"))
3088 llog(RC_LOG, logger,
3089 "sag_eroute() in route_and_eroute() failed restore/replace");
3090 }
3091 }
3092 } else {
3093 /* there was no previous eroute: delete whatever we installed */
3094 if (st == NULL((void*)0)) {
3095 if (!shunt_policy(KP_DELETE_OUTBOUND, c, sr,
3096 sr->routing,
3097 "route_and_eroute() delete",
3098 logger)) {
3099 llog(RC_LOG, logger,
3100 "shunt_policy() in route_and_eroute() failed in !st case");
3101 }
3102 } else {
3103 if (!sag_eroute(st, sr,
3104 KP_DELETE_OUTBOUND,
3105 "delete")) {
3106 llog(RC_LOG, logger,
3107 "sag_eroute() in route_and_eroute() failed in st case for delete");
3108 }
3109 }
3110 }
3111 }
3112
3113 return false0;
3114 }
3115}
3116
3117bool_Bool install_ipsec_sa(struct state *st, bool_Bool inbound_also)
3118{
3119 dbg("kernel: install_ipsec_sa() for #%lu: %s", st->st_serialno,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: install_ipsec_sa() for #%lu: %s", st->
st_serialno, inbound_also ? "inbound and outbound" : "outbound only"
); } }
3120 inbound_also ? "inbound and outbound" : "outbound only"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: install_ipsec_sa() for #%lu: %s", st->
st_serialno, inbound_also ? "inbound and outbound" : "outbound only"
); } }
;
3121
3122 enum routability rb = could_route(st->st_connection, st->st_logger);
3123
3124 switch (rb) {
3125 case route_easy:
3126 case route_unnecessary:
3127 case route_nearconflict:
3128 break;
3129
3130 default:
3131 return false0;
3132 }
3133
3134 /* (attempt to) actually set up the SA group */
3135
3136 /* setup outgoing SA if we haven't already */
3137 if (!st->st_outbound_done) {
3138 if (!setup_half_ipsec_sa(st, false0)) {
3139 return false0;
3140 }
3141
3142 dbg("kernel: set up outgoing SA, ref=%u/%u", st->st_ref,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: set up outgoing SA, ref=%u/%u", st->
st_ref, st->st_ref_peer); } }
3143 st->st_ref_peer){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: set up outgoing SA, ref=%u/%u", st->
st_ref, st->st_ref_peer); } }
;
3144 st->st_outbound_done = true1;
3145 }
3146
3147 /* now setup inbound SA */
3148 if (st->st_ref == IPSEC_SAREF_NULL((IPsecSAref_t)0u) && inbound_also) {
3149 if (!setup_half_ipsec_sa(st, true1))
3150 return false0;
3151
3152 dbg("kernel: set up incoming SA, ref=%u/%u", st->st_ref,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: set up incoming SA, ref=%u/%u", st->
st_ref, st->st_ref_peer); } }
3153 st->st_ref_peer){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: set up incoming SA, ref=%u/%u", st->
st_ref, st->st_ref_peer); } }
;
3154
3155 /*
3156 * We successfully installed an IPsec SA, meaning it is safe
3157 * to clear our revival back-off delay. This is based on the
3158 * assumption that an unwilling partner might complete an IKE
3159 * SA to us, but won't complete an IPsec SA to us.
3160 */
3161 st->st_connection->temp_vars.revive_delay = 0;
3162 }
3163
3164 if (rb == route_unnecessary)
3165 return true1;
3166
3167 struct spd_route *sr = &st->st_connection->spd;
3168
3169 if (st->st_connection->remotepeertype == CISCO && sr->spd_next != NULL((void*)0))
3170 sr = sr->spd_next;
3171
3172 /* for (sr = &st->st_connection->spd; sr != NULL; sr = sr->next) */
3173 struct connection *c = st->st_connection;
3174 if (c->ike_version == IKEv2 && c->spd.this.sec_label.len > 0) {
3175 dbg("kernel: %s() skipping route_and_eroute(st) as security label", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: %s() skipping route_and_eroute(st) as security label"
, __func__); } }
;
3176 } else {
3177 for (; sr != NULL((void*)0); sr = sr->spd_next) {
3178 dbg("kernel: sr for #%lu: %s", st->st_serialno,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: sr for #%lu: %s", st->st_serialno,
enum_name(&routing_story, sr->routing)); } }
3179 enum_name(&routing_story, sr->routing)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: sr for #%lu: %s", st->st_serialno,
enum_name(&routing_story, sr->routing)); } }
;
3180
3181 /*
3182 * if the eroute owner is not us, then make it
3183 * us. See test co-terminal-02,
3184 * pluto-rekey-01, pluto-unit-02/oppo-twice
3185 */
3186 pexpect(sr->eroute_owner == SOS_NOBODY ||({ _Bool assertion__ = sr->eroute_owner == 0 || sr->routing
>= RT_ROUTED_TUNNEL; if (!assertion__) { where_t here_ = (
{ static const struct where here = { .func = __func__, .file =
"programs/pluto/kernel.c", .line = 3187, }; &here; }); const
struct logger *logger_ = &failsafe_logger; llog_pexpect(
logger_, here_, "%s", "sr->eroute_owner == 0 || sr->routing >= RT_ROUTED_TUNNEL"
); } assertion__; })
3187 sr->routing >= RT_ROUTED_TUNNEL)({ _Bool assertion__ = sr->eroute_owner == 0 || sr->routing
>= RT_ROUTED_TUNNEL; if (!assertion__) { where_t here_ = (
{ static const struct where here = { .func = __func__, .file =
"programs/pluto/kernel.c", .line = 3187, }; &here; }); const
struct logger *logger_ = &failsafe_logger; llog_pexpect(
logger_, here_, "%s", "sr->eroute_owner == 0 || sr->routing >= RT_ROUTED_TUNNEL"
); } assertion__; })
;
3188
3189 if (sr->eroute_owner != st->st_serialno &&
3190 sr->routing != RT_UNROUTED_KEYED) {
3191 if (!route_and_eroute(st->st_connection, sr, st, st->st_logger)) {
3192 delete_ipsec_sa(st);
3193 /*
3194 * XXX go and unroute any SRs that were
3195 * successfully routed already.
3196 */
3197 return false0;
3198 }
3199 }
3200 }
3201 }
3202
3203 /* XXX why is this needed? Skip the bogus original conn? */
3204 if (st->st_connection->remotepeertype == CISCO) {
3205 struct spd_route *srcisco = st->st_connection->spd.spd_next;
3206
3207 if (srcisco != NULL((void*)0)) {
3208 st->st_connection->spd.eroute_owner = srcisco->eroute_owner;
3209 st->st_connection->spd.routing = srcisco->routing;
3210 }
3211 }
3212
3213 if (inbound_also)
3214 linux_audit_conn(st, LAK_CHILD_START);
3215 return true1;
3216}
3217
3218bool_Bool migrate_ipsec_sa(struct state *st)
3219{
3220 switch (kernel_ops->type) {
3221 case USE_XFRM:
3222 /* support ah? if(!st->st_esp.present && !st->st_ah.present)) */
3223 if (!st->st_esp.present) {
3224 log_state(RC_LOG, st, "mobike SA migration only support ESP SA");
3225 return false0;
3226 }
3227
3228 if (!kernel_ops->migrate_sa(st))
3229 return false0;
3230
3231 return true1;
3232
3233 default:
3234 dbg("kernel: Unsupported kernel stack in migrate_ipsec_sa"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: Unsupported kernel stack in migrate_ipsec_sa"
); } }
;
3235 return false0;
3236 }
3237}
3238
3239/*
3240 * Delete an IPSEC SA.
3241 * we may not succeed, but we bull ahead anyway because
3242 * we cannot do anything better by recognizing failure
3243 * This used to have a parameter bool inbound_only, but
3244 * the saref code changed to always install inbound before
3245 * outbound so this it was always false, and thus removed
3246 *
3247 */
3248void delete_ipsec_sa(struct state *st)
3249{
3250 /* XXX in IKEv2 we get a spurious call with a parent st :( */
3251 if (IS_CHILD_SA(st)((st)->st_clonedfrom != 0)) {
3252 if (st->st_esp.present || st->st_ah.present) {
3253 /* ESP or AH means this was an established IPsec SA */
3254 linux_audit_conn(st, LAK_CHILD_DESTROY);
3255 }
3256 } else {
3257 log_state(RC_LOG, st,
3258 "delete_ipsec_sa() called with (wrong?) parent state %s",
3259 st->st_state->name);
3260 }
3261
3262 switch (kernel_ops->type) {
3263 case USE_XFRM:
3264 {
3265 /*
3266 * If the state is the eroute owner, we must adjust
3267 * the routing for the connection.
3268 */
3269 struct connection *c = st->st_connection;
3270 struct spd_route *sr;
3271
3272 for (sr = &c->spd; sr; sr = sr->spd_next) {
3273 if (sr->eroute_owner == st->st_serialno &&
3274 sr->routing == RT_ROUTED_TUNNEL) {
3275 sr->eroute_owner = SOS_NOBODY0;
3276
3277 /*
3278 * Routing should become
3279 * RT_ROUTED_FAILURE,
3280 * but if POLICY_FAIL_NONE, then we
3281 * just go right back to
3282 * RT_ROUTED_PROSPECTIVE as if no
3283 * failure happened.
3284 */
3285 sr->routing =
3286 (c->policy &
3287 POLICY_FAIL_MASK(((lset_t)1 << (POLICY_FAIL1_IX)) - ((lset_t)1 <<
(POLICY_FAIL0_IX)) + ((lset_t)1 << (POLICY_FAIL1_IX)))
) ==
3288 POLICY_FAIL_NONE(SHUNT_DEFAULT * ((lset_t)1 << (POLICY_FAIL0_IX))) ?
3289 RT_ROUTED_PROSPECTIVE :
3290 RT_ROUTED_FAILURE;
3291
3292 if (sr == &c->spd &&
3293 c->remotepeertype == CISCO)
3294 continue;
3295
3296 (void) do_command(c, sr, "down", st, st->st_logger);
3297 if ((c->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) &&
3298 c->kind == CK_INSTANCE) {
3299 /*
3300 * in this case we get rid of
3301 * the IPSEC SA
3302 */
3303 unroute_connection(c);
3304 } else if ((c->policy & POLICY_DONT_REKEY((lset_t)1 << (POLICY_DONT_REKEY_IX))) &&
3305 c->kind == CK_INSTANCE) {
3306 /*
3307 * in this special case,
3308 * even if the connection
3309 * is still alive (due to
3310 * an ISAKMP SA),
3311 * we get rid of routing.
3312 * Even though there is still
3313 * an eroute, the c->routing
3314 * setting will convince
3315 * unroute_connection to
3316 * delete it.
3317 * unroute_connection
3318 * would be upset
3319 * if c->routing ==
3320 * RT_ROUTED_TUNNEL
3321 */
3322 unroute_connection(c);
3323 } else {
3324 if (!shunt_policy(KP_REPLACE_OUTBOUND,
3325 c, sr, sr->routing,
3326 "delete_ipsec_sa() replace with shunt",
3327 st->st_logger)) {
3328 log_state(RC_LOG, st,
3329 "shunt_policy() failed replace with shunt in delete_ipsec_sa()");
3330 }
3331 }
3332 }
3333 }
3334 (void) teardown_half_ipsec_sa(st, false0);
3335 }
3336 (void) teardown_half_ipsec_sa(st, true1);
3337
3338 break;
3339 default:
3340 dbg("kernel: unknown kernel stack in delete_ipsec_sa"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: unknown kernel stack in delete_ipsec_sa"
); } }
;
3341 break;
3342 } /* switch kernel_ops->type */
3343}
3344
3345bool_Bool was_eroute_idle(struct state *st, deltatime_t since_when)
3346{
3347 if (kernel_ops->eroute_idle != NULL((void*)0))
3348 return kernel_ops->eroute_idle(st, since_when);
3349
3350 /* it is never idle if we can't check */
3351 return false0;
3352}
3353
3354/*
3355 * get information about a given sa - needs merging with was_eroute_idle
3356 *
3357 * Note: this mutates *st.
3358 */
3359bool_Bool get_sa_info(struct state *st, bool_Bool inbound, deltatime_t *ago /* OUTPUT */)
3360{
3361 struct connection *const c = st->st_connection;
3362
3363 if (kernel_ops->get_sa == NULL((void*)0) || (!st->st_esp.present && !st->st_ah.present)) {
3364 return false0;
3365 }
3366
3367 const struct ip_protocol *proto;
3368 struct ipsec_proto_info *p2;
3369
3370 if (st->st_esp.present) {
3371 proto = &ip_protocol_espip_protocols[IPPROTO_ESP];
3372 p2 = &st->st_esp;
3373 } else if (st->st_ah.present) {
3374 proto = &ip_protocol_ahip_protocols[IPPROTO_AH];
3375 p2 = &st->st_ah;
3376 } else {
3377 return false0;
3378 }
3379
3380 /*
3381 * If we were redirected (using the REDIRECT mechanism),
3382 * change spd.that.host_addr temporarily, we reset it back
3383 * later.
3384 */
3385 bool_Bool redirected = false0;
3386 ip_address tmp_host_addr = unset_address;
3387 unsigned tmp_host_port = 0;
3388 if (!endpoint_address_eq_address(st->st_remote_endpoint, c->spd.that.host_addr) &&
3389 address_is_specified(c->temp_vars.redirect_ip)) {
3390 redirected = true1;
3391 tmp_host_addr = c->spd.that.host_addr;
3392 tmp_host_port = c->spd.that.host_port; /* XXX: needed? */
3393 c->spd.that.host_addr = endpoint_address(st->st_remote_endpoint);
3394 c->spd.that.host_port = endpoint_hport(st->st_remote_endpoint);
3395 }
3396
3397 const ip_address *src, *dst;
3398 ipsec_spi_t spi;
3399 if (inbound) {
3400 src = &c->spd.that.host_addr;
3401 dst = &c->spd.this.host_addr;
3402 spi = p2->our_spi;
3403 } else {
3404 src = &c->spd.this.host_addr;
3405 dst = &c->spd.that.host_addr;
3406 spi = p2->attrs.spi;
3407 }
3408
3409 said_buf sb;
3410 struct kernel_sa sa = {
3411 .spi = spi,
3412 .proto = proto,
3413 .src.address = src,
3414 .dst.address = dst,
3415 .story = said_str(*dst, proto, spi, &sb),
3416 };
3417
3418 dbg("kernel: get_sa_info %s", sa.story){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: get_sa_info %s", sa.story); } }
;
3419
3420 uint64_t bytes;
3421 uint64_t add_time;
3422
3423 if (!kernel_ops->get_sa(&sa, &bytes, &add_time, st->st_logger))
3424 return false0;
3425
3426 p2->add_time = add_time;
3427
3428 /* field has been set? */
3429 passert(!is_monotime_epoch(p2->our_lastused))({ _Bool assertion__ = !is_monotime_epoch(p2->our_lastused
); if (!assertion__) { where_t here = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 3429, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "!is_monotime_epoch(p2->our_lastused)"
); } (void) 1; })
;
3430 passert(!is_monotime_epoch(p2->peer_lastused))({ _Bool assertion__ = !is_monotime_epoch(p2->peer_lastused
); if (!assertion__) { where_t here = ({ static const struct where
here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 3430, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "!is_monotime_epoch(p2->peer_lastused)"
); } (void) 1; })
;
3431
3432 if (inbound) {
3433 if (bytes > p2->our_bytes) {
3434 p2->our_bytes = bytes;
3435 p2->our_lastused = mononow();
3436 }
3437 if (ago != NULL((void*)0))
3438 *ago = monotimediff(mononow(), p2->our_lastused);
3439 } else {
3440 if (bytes > p2->peer_bytes) {
3441 p2->peer_bytes = bytes;
3442 p2->peer_lastused = mononow();
3443 }
3444 if (ago != NULL((void*)0))
3445 *ago = monotimediff(mononow(), p2->peer_lastused);
3446 }
3447
3448 if (redirected) {
3449 c->spd.that.host_addr = tmp_host_addr;
3450 c->spd.that.host_port = tmp_host_port;
3451 }
3452
3453 return true1;
3454}
3455
3456bool_Bool orphan_holdpass(const struct connection *c, struct spd_route *sr,
3457 int transport_proto, ipsec_spi_t failure_shunt,
3458 struct logger *logger)
3459{
3460 enum routing_t ro = sr->routing, /* routing, old */
3461 rn = ro; /* routing, new */
3462 ipsec_spi_t negotiation_shunt = (c->policy & POLICY_NEGO_PASS((lset_t)1 << (POLICY_NEGO_PASS_IX))) ? SPI_PASS : SPI_DROP;
3463
3464 if (negotiation_shunt != failure_shunt ) {
3465 dbg("kernel: failureshunt != negotiationshunt, needs replacing"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: failureshunt != negotiationshunt, needs replacing"
); } }
;
3466 } else {
3467 dbg("kernel: failureshunt == negotiationshunt, no replace needed"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: failureshunt == negotiationshunt, no replace needed"
); } }
;
3468 }
3469
3470 dbg("kernel: orphan_holdpass() called for %s with transport_proto '%d' and sport %d and dport %d",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphan_holdpass() called for %s with transport_proto '%d' and sport %d and dport %d"
, c->name, transport_proto, sr->this.port, sr->that.
port); } }
3471 c->name, transport_proto, sr->this.port, sr->that.port){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphan_holdpass() called for %s with transport_proto '%d' and sport %d and dport %d"
, c->name, transport_proto, sr->this.port, sr->that.
port); } }
;
3472
3473 passert(LHAS(LELEM(CK_PERMANENT) | LELEM(CK_INSTANCE) |({ _Bool assertion__ = (((((lset_t)1 << (CK_PERMANENT))
| ((lset_t)1 << (CK_INSTANCE)) | ((lset_t)1 << (
CK_GOING_AWAY))) & ((lset_t)1 << (c->kind))) != (
(lset_t)0)); if (!assertion__) { where_t here = ({ static const
struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 3474, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "(((((lset_t)1 << (CK_PERMANENT)) | ((lset_t)1 << (CK_INSTANCE)) | ((lset_t)1 << (CK_GOING_AWAY))) & ((lset_t)1 << (c->kind))) != ((lset_t)0))"
); } (void) 1; })
3474 LELEM(CK_GOING_AWAY), c->kind))({ _Bool assertion__ = (((((lset_t)1 << (CK_PERMANENT))
| ((lset_t)1 << (CK_INSTANCE)) | ((lset_t)1 << (
CK_GOING_AWAY))) & ((lset_t)1 << (c->kind))) != (
(lset_t)0)); if (!assertion__) { where_t here = ({ static const
struct where here = { .func = __func__, .file = "programs/pluto/kernel.c"
, .line = 3474, }; &here; }); const struct logger *logger_
= &failsafe_logger; llog_passert(logger_, here, "%s", "(((((lset_t)1 << (CK_PERMANENT)) | ((lset_t)1 << (CK_INSTANCE)) | ((lset_t)1 << (CK_GOING_AWAY))) & ((lset_t)1 << (c->kind))) != ((lset_t)0))"
); } (void) 1; })
;
3475
3476 switch (ro) {
3477 case RT_UNROUTED_HOLD:
3478 rn = RT_UNROUTED;
3479 dbg("kernel: orphan_holdpass unrouted: hold -> pass"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphan_holdpass unrouted: hold -> pass"
); } }
;
3480 break;
3481 case RT_UNROUTED:
3482 rn = RT_UNROUTED_HOLD;
3483 dbg("kernel: orphan_holdpass unrouted: pass -> hold"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphan_holdpass unrouted: pass -> hold"
); } }
;
3484 break;
3485 case RT_ROUTED_HOLD:
3486 rn = RT_ROUTED_PROSPECTIVE;
3487 dbg("kernel: orphan_holdpass routed: hold -> trap (?)"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphan_holdpass routed: hold -> trap (?)"
); } }
;
3488 break;
3489 default:
3490 dbg("kernel: no routing change needed for ro=%s - negotiation shunt matched failure shunt?",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: no routing change needed for ro=%s - negotiation shunt matched failure shunt?"
, enum_name(&routing_story, ro)); } }
3491 enum_name(&routing_story, ro)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: no routing change needed for ro=%s - negotiation shunt matched failure shunt?"
, enum_name(&routing_story, ro)); } }
;
3492 break;
3493 }
3494
3495 dbg("kernel: orphaning holdpass for connection '%s', routing was %s, needs to be %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphaning holdpass for connection '%s', routing was %s, needs to be %s"
, c->name, enum_name(&routing_story, ro), enum_name(&
routing_story, rn)); } }
3496 c->name,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphaning holdpass for connection '%s', routing was %s, needs to be %s"
, c->name, enum_name(&routing_story, ro), enum_name(&
routing_story, rn)); } }
3497 enum_name(&routing_story, ro),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphaning holdpass for connection '%s', routing was %s, needs to be %s"
, c->name, enum_name(&routing_story, ro), enum_name(&
routing_story, rn)); } }
3498 enum_name(&routing_story, rn)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphaning holdpass for connection '%s', routing was %s, needs to be %s"
, c->name, enum_name(&routing_story, ro), enum_name(&
routing_story, rn)); } }
;
3499
3500 {
3501 /* are we replacing a bare shunt ? */
3502 update_selector_hport(&sr->this.client, sr->this.port){ (&sr->this.client)->hport = (sr->this.port); };
3503 update_selector_hport(&sr->that.client, sr->that.port){ (&sr->that.client)->hport = (sr->that.port); };
3504 struct bare_shunt **old = bare_shunt_ptr(&sr->this.client,
3505 &sr->that.client,
3506 sr->this.protocol,
3507 "orphan holdpass");
3508
3509 if (old != NULL((void*)0)) {
3510 free_bare_shunt(old);
3511 }
3512 }
3513
3514 /*
3515 * create the bare shunt and update kernel policy if needed.
3516 */
3517 {
3518 /*
3519 * XXX: merge this add bare shunt code with that
3520 * following the raw_policy() call!?!
3521 */
3522 struct bare_shunt *bs = alloc_thing(struct bare_shunt, "orphan shunt")((struct bare_shunt*) alloc_bytes(sizeof(struct bare_shunt), (
"orphan shunt")))
;
3523
3524 bs->why = "oe-failing";
3525 bs->our_client = sr->this.client;
3526 bs->peer_client = sr->that.client;
3527 bs->transport_proto = sr->this.protocol;
3528 bs->policy_prio = BOTTOM_PRIO((policy_prio_t)0);
3529
3530 bs->said = said_from_address_protocol_spi(selector_type(&sr->this.client)->address.any,
3531 &ip_protocol_internalip_protocols[61],
3532 htonl(negotiation_shunt));
3533
3534 bs->count = 0;
3535 bs->last_activity = mononow();
3536 if (strstr(c->name, "/32") != NULL((void*)0) || strstr(c->name, "/128") != NULL((void*)0)) {
3537 bs->from_serialno = c->serialno;
3538 }
3539
3540 bs->next = bare_shunts;
3541 bare_shunts = bs;
3542 dbg_bare_shunt("add", bs);
3543
3544 /* update kernel policy if needed */
3545 /* This really causes the name to remain "oe-failing", we should be able to update only only the name of the shunt */
3546 if (negotiation_shunt != failure_shunt ) {
3547
3548 dbg("kernel: replacing negotiation_shunt with failure_shunt"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: replacing negotiation_shunt with failure_shunt"
); } }
;
3549
3550 /* fudge up parameter list */
3551 const ip_address *src_address = &sr->this.host_addr;
3552 const ip_address *dst_address = &sr->that.host_addr;
3553 policy_prio_t policy_prio = bs->policy_prio; /* of replacing shunt*/
3554 ipsec_spi_t cur_shunt_spi = negotiation_shunt; /* in host order! */
3555 ipsec_spi_t new_shunt_spi = failure_shunt; /* in host order! */
3556 int transport_proto = bs->transport_proto;
3557 const char *why = "oe-failed";
3558
3559 /* fudge up replace_bare_shunt() */
3560 const struct ip_info *afi = address_type(src_address);
3561 passert(afi == address_type(dst_address))({ _Bool assertion__ = afi == address_type(dst_address); if (
!assertion__) { where_t here = ({ static const struct where here
= { .func = __func__, .file = "programs/pluto/kernel.c", .line
= 3561, }; &here; }); const struct logger *logger_ = &
failsafe_logger; llog_passert(logger_, here, "%s", "afi == address_type(dst_address)"
); } (void) 1; })
;
3562 const ip_protocol *protocol = protocol_by_ipproto(transport_proto);
3563 /* ports? assumed wide? */
3564 ip_selector src = selector_from_address_protocol(*src_address, protocol);
3565 ip_selector dst = selector_from_address_protocol(*dst_address, protocol);
3566
3567 selectors_buf sb;
3568 dbg("kernel: replace bare shunt %s for %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: replace bare shunt %s for %s", str_selectors
(&src, &dst, &sb), why); } }
3569 str_selectors(&src, &dst, &sb), why){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: replace bare shunt %s for %s", str_selectors
(&src, &dst, &sb), why); } }
;
3570
3571 /*
3572 * ??? this comment might be obsolete.
3573 *
3574 * If the transport protocol is not the
3575 * wildcard (0), then we need to look for a
3576 * host<->host shunt, and replace that with
3577 * the shunt spi, and then we add a %HOLD for
3578 * what was there before.
3579 *
3580 * This is at odds with !repl, which should
3581 * delete things.
3582 */
3583
3584 const ip_address null_host = afi->address.any;
3585 bool_Bool ok = raw_policy(KP_REPLACE_OUTBOUND,
3586 &null_host, &src, &null_host, &dst,
3587 htonl(cur_shunt_spi), htonl(new_shunt_spi),
3588 transport_proto, ET_INT,
3589 esp_transport_proto_info&esp_transport_kernel_encap,
3590 deltatime(SHUNT_PATIENCE((2 * 10) * 15 / 2)),
3591 0, /* we don't know connection for priority yet */
3592 NULL((void*)0), /* sa_marks */
3593 0 /* xfrm interface id */,
3594 null_shunk, logger,
3595 "%s() %s", __func__, why);
3596 if (!ok) {
3597 llog(RC_LOG, logger,
3598 "replace kernel shunt %s failed - deleting from pluto shunt table",
3599 str_selectors_sensitive(&src, &dst, &sb));
3600 }
3601
3602 /*
3603 * We can have proto mismatching acquires with
3604 * xfrm - this is a bad workaround.
3605 *
3606 * ??? what is the nature of those mismatching
3607 * acquires?
3608 *
3609 * XXX: for instance, when whack initiates an
3610 * OE connection. There is no kernel-acquire
3611 * shunt to remove.
3612 *
3613 * XXX: see above, this code is looking for
3614 * and fiddling with the shunt only just added
3615 * above?
3616 */
3617 struct bare_shunt **bs_pp = bare_shunt_ptr(&src, &dst, transport_proto, why);
3618 /* passert(bs_pp != NULL); */
3619 if (bs_pp == NULL((void*)0)) {
3620 selectors_buf sb;
3621 llog(RC_LOG, logger,
3622 "can't find expected bare shunt to %s: %s",
3623 ok ? "replace" : "delete",
3624 str_selectors_sensitive(&src, &dst, &sb));
3625 } else if (ok) {
3626 /*
3627 * change over to new bare eroute
3628 * ours, peers, transport_proto are
3629 * the same.
3630 */
3631 struct bare_shunt *bs = *bs_pp;
3632 bs->why = why;
3633 bs->policy_prio = policy_prio;
3634 bs->said = said_from_address_protocol_spi(null_host,
3635 &ip_protocol_internalip_protocols[61],
3636 htonl(new_shunt_spi));
3637 bs->count = 0;
3638 bs->last_activity = mononow();
3639 dbg_bare_shunt("replace", bs);
3640 } else {
3641 llog(RC_LOG, logger,
3642 "assign_holdpass() failed to update shunt policy");
3643 free_bare_shunt(bs_pp);
3644 }
3645 } else {
3646 dbg("kernel: No need to replace negotiation_shunt with failure_shunt - they are the same"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: No need to replace negotiation_shunt with failure_shunt - they are the same"
); } }
;
3647 }
3648 }
3649
3650 /* change routing so we don't get cleared out when state/connection dies */
3651 sr->routing = rn;
3652 dbg("kernel: orphan_holdpas() done - returning success"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: orphan_holdpas() done - returning success"
); } }
;
3653 return true1;
3654}
3655
3656static void expire_bare_shunts(struct logger *logger, bool_Bool all)
3657{
3658 dbg("kernel: checking for aged bare shunts from shunt table to expire"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX)
)))) { DBG_log("kernel: checking for aged bare shunts from shunt table to expire"
); } }
;
3659 for (struct bare_shunt **bspp = &bare_shunts; *bspp != NULL((void*)0); ) {
3660 struct bare_shunt *bsp = *bspp;
3661 time_t age = deltasecs(monotimediff(mononow(), bsp->last_activity));
3662
3663 if (age > deltasecs(pluto_shunt_lifetime) || all) {
3664 dbg_bare_shunt("expiring old", bsp);
3665 if (co_serial_is_set(bsp->from_serialno)((bsp->from_serialno) != UNSET_CO_SERIAL)) {
3666 struct connection *c = connection_by_serialno(bsp->from_serialno);
3667 if (c != NULL((void*)0)) {
3668 if (!shunt_policy(KP_ADD_OUTBOUND, c, &c->spd,
3669 RT_ROUTED_PROSPECTIVE,
3670 "expire_bare_shunts() add",
3671 logger)) {
3672 llog(RC_LOG, logger,
3673 "trap shunt install failed ");
3674 }
3675 }
3676 }
3677 ip_address our_addr = selector_prefix(bsp->our_client);
3678 ip_address peer_addr = selector_prefix(bsp->peer_client);
3679 bool_Bool skip_xfrm_policy_delete = co_serial_is_set(bsp->from_serialno)((bsp->from_serialno) != UNSET_CO_SERIAL);
3680 if (!delete_bare_shunt(&our_addr, &peer_addr,
3681 bsp->transport_proto,
3682 ntohl(bsp->said.spi),
3683 skip_xfrm_policy_delete,
3684 "expire_bare_shunts()", logger)) {
3685 llog(RC_LOG_SERIOUS, logger,
3686 "failed to delete bare shunt");
3687 }
3688 passert(bsp != *bspp)({ _Bool assertion__ = bsp != *bspp; if (!assertion__) { where_t
here = ({ static const struct where here = { .func = __func__
, .file = "programs/pluto/kernel.c", .line = 3688, }; &here
; }); const struct logger *logger_ = &failsafe_logger; llog_passert
(logger_, here, "%s", "bsp != *bspp"); } (void) 1; })
;
3689 } else {
3690 dbg_bare_shunt("keeping recent", bsp);
3691 bspp = &bsp->next;
3692 }
3693 }
3694}
3695
3696static void kernel_scan_shunts(struct logger *logger)
3697{
3698 expire_bare_shunts(logger, false0/*not-all*/);
3699}
3700
3701void shutdown_kernel(struct logger *logger)
3702{
3703
3704 if (kernel_ops->shutdown != NULL((void*)0))
3705 kernel_ops->shutdown(logger);
3706 expire_bare_shunts(logger, true1/*all*/);
3707}