File: | programs/pluto/connections.c |
Warning: | line 896, column 9 Value stored to 'err' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* information about connections between hosts and clients |
2 | * |
3 | * Copyright (C) 1998-2002,2010,2013,2018 D. Hugh Redelmeier <hugh@mimosa.com> |
4 | * Copyright (C) 2003-2008 Michael Richardson <mcr@xelerance.com> |
5 | * Copyright (C) 2003-2011 Paul Wouters <paul@xelerance.com> |
6 | * Copyright (C) 2008-2009 David McCullough <david_mccullough@securecomputing.com> |
7 | * Copyright (C) 2009-2011 Avesh Agarwal <avagarwa@redhat.com> |
8 | * Copyright (C) 2010 Bart Trojanowski <bart@jukie.net> |
9 | * Copyright (C) 2010 Shinichi Furuso <Shinichi.Furuso@jp.sony.com> |
10 | * Copyright (C) 2010,2013 Tuomo Soini <tis@foobar.fi> |
11 | * Copyright (C) 2012-2017 Paul Wouters <paul@libreswan.org> |
12 | * Copyright (C) 2012 Philippe Vouters <Philippe.Vouters@laposte.net> |
13 | * Copyright (C) 2012 Bram <bram-bcrafjna-erqzvar@spam.wizbit.be> |
14 | * Copyright (C) 2013 Kim B. Heino <b@bbbs.net> |
15 | * Copyright (C) 2013,2017 Antony Antony <antony@phenome.org> |
16 | * Copyright (C) 2013,2018 Matt Rogers <mrogers@redhat.com> |
17 | * Copyright (C) 2013 Florian Weimer <fweimer@redhat.com> |
18 | * Copyright (C) 2015-2020 Paul Wouters <pwouters@redhat.com> |
19 | * Copyright (C) 2016-2020 Andrew Cagney <cagney@gnu.org> |
20 | * Copyright (C) 2017 Mayank Totale <mtotale@gmail.com> |
21 | * |
22 | * This program is free software; you can redistribute it and/or modify it |
23 | * under the terms of the GNU General Public License as published by the |
24 | * Free Software Foundation; either version 2 of the License, or (at your |
25 | * option) any later version. See <https://www.gnu.org/licenses/gpl2.txt>. |
26 | * |
27 | * This program is distributed in the hope that it will be useful, but |
28 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
29 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
30 | * for more details. |
31 | */ |
32 | |
33 | #include <string.h> |
34 | #include <stdio.h> |
35 | #include <stddef.h> |
36 | #include <stdlib.h> |
37 | #include <unistd.h> |
38 | #include <netinet/in.h> |
39 | #include <sys/socket.h> |
40 | #include <sys/stat.h> |
41 | #include <arpa/inet.h> |
42 | #include <resolv.h> |
43 | #include <errno(*__errno_location ()).h> |
44 | #include <limits.h> |
45 | |
46 | #include "sysdep.h" |
47 | #include "constants.h" |
48 | #include "lswalloc.h" |
49 | #include "lswconf.h" |
50 | #include "id.h" |
51 | #include "x509.h" |
52 | #include "certs.h" |
53 | #include "secrets.h" |
54 | |
55 | #include "defs.h" |
56 | #include "connections.h" /* needs id.h */ |
57 | #include "connection_db.h" |
58 | #include "pending.h" |
59 | #include "foodgroups.h" |
60 | #include "packet.h" |
61 | #include "demux.h" /* needs packet.h */ |
62 | #include "state.h" |
63 | #include "timer.h" |
64 | #include "ipsec_doi.h" /* needs demux.h and state.h */ |
65 | #include "server.h" |
66 | #include "kernel.h" /* needs connections.h */ |
67 | #include "log.h" |
68 | #include "keys.h" |
69 | #include "whack.h" |
70 | #include "ike_alg.h" |
71 | #include "kernel_alg.h" |
72 | #include "plutoalg.h" |
73 | #include "ikev1_xauth.h" |
74 | #include "addresspool.h" |
75 | #include "nat_traversal.h" |
76 | #include "pluto_x509.h" |
77 | #include "nss_cert_verify.h" /* for cert_VerifySubjectAltName() */ |
78 | #include "nss_cert_load.h" |
79 | #include "ikev2.h" |
80 | #include "virtual_ip.h" /* needs connections.h */ |
81 | #include "host_pair.h" |
82 | #include "lswfips.h" |
83 | #include "crypto.h" |
84 | #include "kernel_xfrm.h" |
85 | #include "ip_address.h" |
86 | #include "ip_info.h" |
87 | #include "keyhi.h" /* for SECKEY_DestroyPublicKey */ |
88 | #include "state_db.h" |
89 | # include "kernel_xfrm_interface.h" |
90 | #include "iface.h" |
91 | #include "ip_selector.h" |
92 | #include "labeled_ipsec.h" /* for vet_seclabel() */ |
93 | #include "orient.h" |
94 | |
95 | #define MINIMUM_IPSEC_SA_RANDOM_MARK65536 65536 |
96 | static uint32_t global_marks = MINIMUM_IPSEC_SA_RANDOM_MARK65536; |
97 | |
98 | static bool_Bool idr_wildmatch(const struct end *this, const struct id *b, struct logger *logger); |
99 | |
100 | /* |
101 | * Find a connection by name. |
102 | * |
103 | * no_inst: don't accept a CK_INSTANCE. |
104 | */ |
105 | |
106 | struct connection *conn_by_name(const char *nm, bool_Bool no_inst) |
107 | { |
108 | dbg("FOR_EACH_CONNECTION_... in %s", __func__){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("FOR_EACH_CONNECTION_... in %s", __func__); } }; |
109 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 109, }; &here; }), .c = NULL((void*)0), }; |
110 | while (new2old_connection(&cq)) { |
111 | struct connection *c = cq.c; |
112 | if (no_inst && c->kind == CK_INSTANCE) { |
113 | continue; |
114 | } |
115 | if (!streq(c->name, nm)(strcmp((c->name), (nm)) == 0)) { |
116 | continue; |
117 | } |
118 | return c; |
119 | } |
120 | return NULL((void*)0); |
121 | } |
122 | |
123 | void release_connection(struct connection *c, bool_Bool relations) |
124 | { |
125 | pexpect(c->kind != CK_INSTANCE)({ _Bool assertion__ = c->kind != CK_INSTANCE; if (!assertion__ ) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 125 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_pexpect(logger_, here_, "%s", "c->kind != CK_INSTANCE" ); } assertion__; }); |
126 | flush_pending_by_connection(c); |
127 | delete_states_by_connection(c, relations); |
128 | unroute_connection(c); |
129 | } |
130 | |
131 | /* Delete a connection */ |
132 | static void delete_end(struct end *e) |
133 | { |
134 | free_id_content(&e->id); |
135 | |
136 | if (e->cert.nss_cert != NULL((void*)0)) |
137 | CERT_DestroyCertificate(e->cert.nss_cert); |
138 | |
139 | free_chunk_content(&e->ca); |
140 | pfreeany(e->updown){ typeof(e->updown) *pp_ = &(e->updown); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
141 | pfreeany(e->host_addr_name){ typeof(e->host_addr_name) *pp_ = &(e->host_addr_name ); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
142 | pfreeany(e->xauth_password){ typeof(e->xauth_password) *pp_ = &(e->xauth_password ); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
143 | pfreeany(e->xauth_username){ typeof(e->xauth_username) *pp_ = &(e->xauth_username ); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
144 | pfreeany(e->ckaid){ typeof(e->ckaid) *pp_ = &(e->ckaid); if (*pp_ != ( (void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
145 | free_chunk_content(&e->sec_label); |
146 | } |
147 | |
148 | static void delete_sr(struct spd_route *sr) |
149 | { |
150 | delete_end(&sr->this); |
151 | delete_end(&sr->that); |
152 | } |
153 | |
154 | /* |
155 | * delete_connection -- removes a connection by pointer |
156 | * |
157 | * @c - the connection pointer |
158 | * @relations - whether to delete any instances as well. |
159 | * @connection_valid - apply sanity checks |
160 | * |
161 | */ |
162 | |
163 | static void discard_connection(struct connection **cp, bool_Bool connection_valid); |
164 | |
165 | void delete_connection(struct connection **cp, bool_Bool relations) |
166 | { |
167 | struct connection *c = *cp; |
168 | *cp = NULL((void*)0); |
169 | |
170 | /* |
171 | * Must be careful to avoid circularity: |
172 | * we mark c as going away so it won't get deleted recursively. |
173 | */ |
174 | passert(c->kind != CK_GOING_AWAY)({ _Bool assertion__ = c->kind != CK_GOING_AWAY; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 174 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "c->kind != CK_GOING_AWAY" ); } (void) 1; }); |
175 | if (c->kind == CK_INSTANCE) { |
176 | if ((c->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) == LEMPTY((lset_t)0)) { |
177 | address_buf b; |
178 | llog(RC_LOG, c->logger, |
179 | "deleting connection instance with peer %s {isakmp=#%lu/ipsec=#%lu}", |
180 | str_address_sensitive(&c->spd.that.host_addr, &b), |
181 | c->newest_ike_sa, c->newest_ipsec_sa); |
182 | } |
183 | c->kind = CK_GOING_AWAY; |
184 | if (c->pool != NULL((void*)0)) { |
185 | free_that_address_lease(c); |
186 | } |
187 | } |
188 | release_connection(c, relations); |
189 | discard_connection(&c, true1/*connection_valid*/); |
190 | } |
191 | |
192 | static void discard_connection(struct connection **cp, bool_Bool connection_valid) |
193 | { |
194 | struct connection *c = *cp; |
195 | *cp = NULL((void*)0); |
196 | |
197 | if (c->kind == CK_GROUP) |
198 | delete_group(c); |
199 | |
200 | if (c->pool != NULL((void*)0)) |
201 | unreference_addresspool(c); |
202 | |
203 | if (IS_XFRMI1 && c->xfrmi != NULL((void*)0)) |
204 | unreference_xfrmi(c); |
205 | |
206 | struct logger *connection_logger = clone_logger(c->logger, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 206, }; &here; })); |
207 | |
208 | /* find and delete c from the host pair list */ |
209 | host_pair_remove_connection(c, connection_valid); |
210 | |
211 | flush_revival(c); |
212 | |
213 | pfreeany(c->name){ typeof(c->name) *pp_ = &(c->name); if (*pp_ != (( void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
214 | pfreeany(c->foodgroup){ typeof(c->foodgroup) *pp_ = &(c->foodgroup); if ( *pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
215 | pfreeany(c->connalias){ typeof(c->connalias) *pp_ = &(c->connalias); if ( *pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
216 | pfreeany(c->vti_iface){ typeof(c->vti_iface) *pp_ = &(c->vti_iface); if ( *pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
217 | pfreeany(c->modecfg_dns){ typeof(c->modecfg_dns) *pp_ = &(c->modecfg_dns); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
218 | pfreeany(c->modecfg_domains){ typeof(c->modecfg_domains) *pp_ = &(c->modecfg_domains ); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
219 | pfreeany(c->modecfg_banner){ typeof(c->modecfg_banner) *pp_ = &(c->modecfg_banner ); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
220 | pfreeany(c->dnshostname){ typeof(c->dnshostname) *pp_ = &(c->dnshostname); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
221 | pfreeany(c->redirect_to){ typeof(c->redirect_to) *pp_ = &(c->redirect_to); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
222 | pfreeany(c->accept_redirect_to){ typeof(c->accept_redirect_to) *pp_ = &(c->accept_redirect_to ); if (*pp_ != ((void*)0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
223 | free_logger(&c->logger, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 223, }; &here; })); |
224 | |
225 | /* deal with top spd_route and then the rest */ |
226 | |
227 | passert(c->spd.this.virt == NULL)({ _Bool assertion__ = c->spd.this.virt == ((void*)0); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 227, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "c->spd.this.virt == ((void*)0)" ); } (void) 1; }); |
228 | |
229 | virtual_ip_delref(&c->spd.this.virt, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 229, }; &here; })); |
230 | virtual_ip_delref(&c->spd.that.virt, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 230, }; &here; })); |
231 | |
232 | struct spd_route *sr = c->spd.spd_next; |
233 | |
234 | delete_sr(&c->spd); |
235 | |
236 | while (sr != NULL((void*)0)) { |
237 | struct spd_route *next_sr = sr->spd_next; |
238 | |
239 | passert(sr->this.virt == NULL)({ _Bool assertion__ = sr->this.virt == ((void*)0); if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 239 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "sr->this.virt == ((void*)0)" ); } (void) 1; }); |
240 | passert(sr->that.virt == NULL)({ _Bool assertion__ = sr->that.virt == ((void*)0); if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 240 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "sr->that.virt == ((void*)0)" ); } (void) 1; }); |
241 | delete_sr(sr); |
242 | /* ??? should we: pfree(sr); */ |
243 | sr = next_sr; |
244 | } |
245 | |
246 | proposals_delref(&c->ike_proposals.p); |
247 | proposals_delref(&c->child_proposals.p); |
248 | |
249 | free_ikev2_proposals(&c->v2_ike_proposals); |
250 | free_ikev2_proposals(&c->v2_ike_auth_child_proposals); |
251 | free_ikev2_proposals(&c->v2_create_child_proposals); |
252 | c->v2_create_child_proposals_default_dh = NULL((void*)0); /* static pointer */ |
253 | |
254 | remove_connection_from_db(c); |
255 | |
256 | if (c->root_config != NULL((void*)0)) { |
257 | passert(co_serial_is_unset(c->serial_from))({ _Bool assertion__ = ((c->serial_from) == UNSET_CO_SERIAL ); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 257, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "((c->serial_from) == UNSET_CO_SERIAL)" ); } (void) 1; }); |
258 | free_chunk_content(&c->root_config->sec_label); |
259 | pfree(c->root_config); |
260 | } |
261 | |
262 | pfree(c); |
263 | free_logger(&connection_logger, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 263, }; &here; })); |
264 | } |
265 | |
266 | int foreach_connection_by_alias(const char *alias, |
267 | int (*f)(struct connection *c, |
268 | void *arg, struct logger *logger), |
269 | void *arg, struct logger *logger) |
270 | { |
271 | int count = 0; |
272 | |
273 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 273, }; &here; }), .c = NULL((void*)0), }; |
274 | while (new2old_connection(&cq)) { |
275 | struct connection *p = cq.c; |
276 | |
277 | if (lsw_alias_cmp(alias, p->connalias)) |
278 | count += (*f)(p, arg, logger); |
279 | } |
280 | return count; |
281 | } |
282 | |
283 | /* |
284 | * return -1 if nothing was found at all; else total from f() |
285 | */ |
286 | |
287 | int foreach_concrete_connection_by_name(const char *name, |
288 | int (*f)(struct connection *c, |
289 | void *arg, struct logger *logger), |
290 | void *arg, struct logger *logger) |
291 | { |
292 | /* |
293 | * Find the first non-CK_INSTANCE connection matching NAME; |
294 | * that is CK_GROUP, CK_TEMPLATE, CK_PERMENANT, CK_GOING_AWAY. |
295 | * |
296 | * If this search succeeds, then the function also succeeds. |
297 | * |
298 | * But here's the kicker: |
299 | * |
300 | * The original conn_by_name() call also moved the connection |
301 | * to the front of the connections list. For CK_GROUP and |
302 | * CK_TEMPLATE this put any CK_INSTANCES after it in the list |
303 | * so continuing the search would find them (without this the |
304 | * list is new-to-old so instances would have been skipped). |
305 | * |
306 | * This code achieves the same effect by searching old2new. |
307 | */ |
308 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 308, }; &here; }), .c = NULL((void*)0), }; |
309 | bool_Bool found = false0; |
310 | while (old2new_connection(&cq)) { |
311 | struct connection *c = cq.c; |
312 | if (c->kind == CK_INSTANCE) { |
313 | continue; |
314 | } |
315 | if (!streq(c->name, name)(strcmp((c->name), (name)) == 0)) { |
316 | continue; |
317 | } |
318 | found = true1; |
319 | break; |
320 | } |
321 | if (!found) { |
322 | /* nothing matched at all */ |
323 | return -1; |
324 | } |
325 | /* |
326 | * Now continue with the connection list looking for |
327 | * CK_PERMENANT and/or CK_INSTANCE connections with the name. |
328 | */ |
329 | int total = 0; |
330 | do { |
331 | struct connection *c = cq.c; |
332 | if (c->kind >= CK_PERMANENT && |
333 | !NEVER_NEGOTIATE(c->policy)(((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) && |
334 | streq(c->name, name)(strcmp((c->name), (name)) == 0)) { |
335 | total += f(c, arg, logger); |
336 | } |
337 | } while (old2new_connection(&cq)); |
338 | return total; |
339 | } |
340 | |
341 | static int delete_connection_wrap(struct connection *c, void *arg UNUSED__attribute__ ((unused)), struct logger *logger) |
342 | { |
343 | /* XXX: something better? */ |
344 | close_any(&c->logger->global_whackfd)close_any_fd((&c->logger->global_whackfd), ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 344, }; &here; })); |
345 | c->logger->global_whackfd = fd_dup(logger->global_whackfd, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 345, }; &here; })); /* freed by discard_conection() */ |
346 | |
347 | delete_connection(&c, false0); |
348 | return 1; |
349 | } |
350 | |
351 | /* Delete connections with the specified name */ |
352 | void delete_connections_by_name(const char *name, bool_Bool strict, struct logger *logger) |
353 | { |
354 | passert(name != NULL)({ _Bool assertion__ = name != ((void*)0); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 354 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "name != ((void*)0)"); } ( void) 1; }); |
355 | struct connection *c = conn_by_name(name, strict); |
356 | if (c != NULL((void*)0)) { |
357 | do { |
358 | /* XXX: something better? */ |
359 | delete_connection_wrap(c, NULL((void*)0), logger); |
360 | c = conn_by_name(name, false0/*!strict*/); |
361 | } while (c != NULL((void*)0)); |
362 | } else { |
363 | foreach_connection_by_alias(name, delete_connection_wrap, NULL((void*)0), logger); |
364 | } |
365 | } |
366 | |
367 | void delete_every_connection(void) |
368 | { |
369 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 369, }; &here; }), .c = NULL((void*)0), }; |
370 | /* Delete instances before templates. */ |
371 | while (new2old_connection(&cq)) { |
372 | struct connection *c = cq.c; |
373 | delete_connection(&c, true1); |
374 | } |
375 | } |
376 | |
377 | ip_port end_host_port(const struct end *end, const struct end *other) |
378 | { |
379 | unsigned port; |
380 | if (end->config->host.ikeport != 0) { |
381 | /* |
382 | * The END's IKEPORT was specified in the config file. |
383 | * Use that. |
384 | */ |
385 | port = end->config->host.ikeport; |
386 | } else if (other->config->host.ikeport != 0) { |
387 | /* |
388 | * The other end's IKEPORT was specified in the config |
389 | * file. Since specifying an IKEPORT implies ESP |
390 | * encapsulation (i.e. IKE packets must include the |
391 | * ESP=0 prefix), send packets from the encapsulating |
392 | * NAT_IKE_UDP_PORT. |
393 | */ |
394 | port = NAT_IKE_UDP_PORT4500; |
395 | } else if (other->host_encap) { |
396 | /* |
397 | * See above. Presumably an instance which previously |
398 | * had a natted port and is being revived. |
399 | */ |
400 | port = NAT_IKE_UDP_PORT4500; |
401 | } else { |
402 | port = IKE_UDP_PORT500; |
403 | } |
404 | return ip_hport(port); |
405 | } |
406 | |
407 | void update_ends_from_this_host_addr(struct end *this, struct end *that) |
408 | { |
409 | const struct ip_info *afi = address_type(&this->host_addr); |
410 | if (afi == NULL((void*)0)) { |
411 | dbg("%s.host_addr's address family is unknown; skipping default_end()",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s.host_addr's address family is unknown; skipping default_end()" , this->config->leftright); } } |
412 | this->config->leftright){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s.host_addr's address family is unknown; skipping default_end()" , this->config->leftright); } }; |
413 | return; |
414 | } |
415 | |
416 | if (address_is_any(this->host_addr)) { |
417 | dbg("%s.host_addr's is %%any; skipping default_end()",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s.host_addr's is %%any; skipping default_end()" , this->config->leftright); } } |
418 | this->config->leftright){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s.host_addr's is %%any; skipping default_end()" , this->config->leftright); } }; |
419 | return; |
420 | } |
421 | |
422 | dbg("updating connection from %s.host_addr", this->config->leftright){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("updating connection from %s.host_addr", this-> config->leftright); } }; |
423 | |
424 | /* Default ID to IP (but only if not NO_IP -- WildCard) */ |
425 | if (this->id.kind == ID_NONE && address_is_specified(this->host_addr)) { |
426 | this->id.kind = afi->id_ip_addr; |
427 | this->id.ip_addr = this->host_addr; |
428 | this->has_id_wildcards = false0; |
429 | } |
430 | |
431 | /* propagate this HOST_ADDR to that. */ |
432 | if (address_is_unset(&that->host_nexthop) || |
433 | address_is_any(that->host_nexthop)) { |
434 | that->host_nexthop = this->host_addr; |
435 | address_buf ab; |
436 | dbg("%s host_nexthop %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s host_nexthop %s", that->config->leftright , str_address(&that->host_nexthop, &ab)); } } |
437 | that->config->leftright, str_address(&that->host_nexthop, &ab)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s host_nexthop %s", that->config->leftright , str_address(&that->host_nexthop, &ab)); } }; |
438 | } |
439 | |
440 | /* |
441 | * If THAT has an IKEPORT (which means messages are ESP=0 |
442 | * prefixed), then THIS must send from either IKEPORT or the |
443 | * NAT port (and also ESP=0 prefix messages). |
444 | */ |
445 | unsigned host_port = hport(end_host_port(this, that)); |
446 | dbg("%s() %s.host_port: %u->%u", __func__, this->config->leftright,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s() %s.host_port: %u->%u", __func__, this ->config->leftright, this->host_port, host_port); } } |
447 | this->host_port, host_port){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s() %s.host_port: %u->%u", __func__, this ->config->leftright, this->host_port, host_port); } }; |
448 | this->host_port = host_port; |
449 | |
450 | /* Default client to subnet containing only self */ |
451 | if (!this->has_client) { |
452 | /* |
453 | * Default client to a subnet containing only self. |
454 | * |
455 | * For instance, the config file omitted subnet, but |
456 | * specified protoport; merge that. |
457 | */ |
458 | this->client = selector_from_address_protoport(this->host_addr, |
459 | this->config->client.protoport); |
460 | } |
461 | |
462 | if (this->sendcert == 0) { |
463 | /* uninitialized (ugly hack) */ |
464 | this->sendcert = CERT_SENDIFASKED; |
465 | } |
466 | } |
467 | |
468 | /* |
469 | * Format the topology of a connection end, leaving out defaults. |
470 | * Used to construct strings of the form: |
471 | * |
472 | * LOCAL_END ...END_REMOTE |
473 | * |
474 | * where END_REMOTE is roughly formatted as the mirror image of |
475 | * LOCAL_END. IS_LEFT (confusing name given connection left/right) |
476 | * determines if the LHS or RHS string is being emitted.. LOCAL_END's |
477 | * longest string is: |
478 | * |
479 | * client === host : port [ host_id ] --- HOP |
480 | * |
481 | * Note: if that == NULL, skip nexthop Returns strlen of formatted |
482 | * result (length excludes NUL at end). |
483 | */ |
484 | |
485 | static void jam_end_host(struct jambuf *buf, const struct end *this, lset_t policy) |
486 | { |
487 | /* HOST */ |
488 | bool_Bool dohost_name; |
489 | bool_Bool dohost_port; |
490 | if (address_is_unset(&this->host_addr) || |
491 | address_is_any(this->host_addr)) { |
492 | dohost_port = false0; |
493 | if (this->host_type == KH_IPHOSTNAME) { |
494 | dohost_name = true1; |
495 | jam_string(buf, "%dns"); |
496 | } else { |
497 | dohost_name = false0; |
498 | switch (policy & (POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX)) | POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX)))) { |
499 | case POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX)): |
500 | jam_string(buf, "%group"); |
501 | break; |
502 | case POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX)): |
503 | jam_string(buf, "%opportunistic"); |
504 | break; |
505 | case POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX)) | POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX)): |
506 | jam_string(buf, "%opportunisticgroup"); |
507 | break; |
508 | default: |
509 | jam_string(buf, "%any"); |
510 | break; |
511 | } |
512 | } |
513 | } else if (is_virtual_end(this)) { |
514 | dohost_name = false0; |
515 | dohost_port = false0; |
516 | jam_string(buf, "%virtual"); |
517 | } else { |
518 | dohost_name = true1; |
519 | dohost_port = true1; |
520 | jam_address_sensitive(buf, &this->host_addr); |
521 | } |
522 | |
523 | /* <NAME> */ |
524 | if (dohost_name && this->host_addr_name != NULL((void*)0)) { |
525 | jam(buf, "<%s>", this->host_addr_name); |
526 | } |
527 | |
528 | /* |
529 | * XXX: only print anomalies: when the host address is |
530 | * "non-zero", a non-IKE_UDP_PORT; and when zero, any non-zero |
531 | * port. |
532 | */ |
533 | if (dohost_port ? (this->config->host.ikeport != 0 || this->host_port != IKE_UDP_PORT500) : |
534 | this->host_port != 0) { |
535 | /* |
536 | * XXX: Part of the problem is that code is stomping |
537 | * on the HOST_ADDR's port setting it to the CLIENT's |
538 | * port. |
539 | * |
540 | * XXX: Format this as ADDRESS:PORT<name> not |
541 | * ADDRESS<name>:PORT? Or always emit the PORT? |
542 | */ |
543 | jam(buf, ":%u", this->host_port); |
544 | } |
545 | } |
546 | |
547 | static void jam_end_client(struct jambuf *buf, const struct end *this, |
548 | lset_t policy, bool_Bool is_left) |
549 | { |
550 | /* [CLIENT===] or [===CLIENT] */ |
551 | if (!this->has_client) { |
552 | return; |
553 | } |
554 | if (selector_is_unset(&this->client)) { |
555 | return; |
556 | } |
557 | bool_Bool boring = (selector_is_all(this->client) && |
558 | (policy & (POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX)) | POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))))); |
559 | |
560 | if (!boring && !is_left) { |
561 | jam_string(buf, "==="); |
562 | } |
563 | |
564 | if (boring) { |
565 | /* boring case */ |
566 | } else if (is_virtual_end(this)) { |
567 | if (is_virtual_vhost(this)) |
568 | jam_string(buf, "vhost:?"); |
569 | else |
570 | jam_string(buf, "vnet:?"); |
571 | } else if (selector_is_zero(this->client)) { |
572 | jam_string(buf, "?"); |
573 | } else { |
574 | jam_selector_subnet(buf, &this->client); |
575 | } |
576 | |
577 | if (!boring && is_left) { |
578 | jam_string(buf, "==="); |
579 | } |
580 | } |
581 | |
582 | static void jam_end_protoport(struct jambuf *buf, const struct end *this) |
583 | { |
584 | /* payload portocol and port */ |
585 | if (this->has_port_wildcard) { |
586 | jam(buf, ":%u/%%any", this->protocol); |
587 | } else if (this->port || this->protocol) { |
588 | jam(buf, ":%u/%u", this->protocol, this->port); |
589 | } |
590 | } |
591 | |
592 | static void jam_end_id(struct jambuf *buf, const struct end *this) |
593 | { |
594 | /* id, if different from host */ |
595 | bool_Bool open_paren = false0; |
596 | if (!(this->id.kind == ID_NONE || |
597 | (id_is_ipaddr(&this->id)((&this->id)->kind == ID_IPV4_ADDR || (&this-> id)->kind == ID_IPV6_ADDR) && |
598 | sameaddr(&this->id.ip_addr, &this->host_addr)))) { |
599 | open_paren = true1; |
600 | jam_string(buf, "["); |
601 | jam_id_bytes(buf, &this->id, jam_sanitized_bytes); |
602 | } |
603 | |
604 | if (this->modecfg_server || this->modecfg_client || |
605 | this->xauth_server || this->xauth_client || |
606 | this->sendcert != cert_defaultcertpolicyCERT_ALWAYSSEND) { |
607 | |
608 | if (open_paren) { |
609 | jam_string(buf, ","); |
610 | } else { |
611 | open_paren = true1; |
612 | jam_string(buf, "["); |
613 | } |
614 | |
615 | if (this->modecfg_server) |
616 | jam_string(buf, "MS"); |
617 | if (this->modecfg_client) |
618 | jam_string(buf, "+MC"); |
619 | if (this->cat) |
620 | jam_string(buf, "+CAT"); |
621 | if (this->xauth_server) |
622 | jam_string(buf, "+XS"); |
623 | if (this->xauth_client) |
624 | jam_string(buf, "+XC"); |
625 | |
626 | switch (this->sendcert) { |
627 | case CERT_NEVERSEND: |
628 | jam(buf, "+S-C"); |
629 | break; |
630 | case CERT_SENDIFASKED: |
631 | jam(buf, "+S?C"); |
632 | break; |
633 | case CERT_ALWAYSSEND: |
634 | jam(buf, "+S=C"); |
635 | break; |
636 | default: |
637 | jam(buf, "+UNKNOWN"); |
638 | } |
639 | } |
640 | |
641 | if (open_paren) { |
642 | jam_string(buf, "]"); |
643 | } |
644 | } |
645 | |
646 | static void jam_end_nexthop(struct jambuf *buf, const struct end *this, |
647 | const struct end *that, bool_Bool filter_rnh, bool_Bool is_left) |
648 | { |
649 | /* [---hop] */ |
650 | if (that != NULL((void*)0) && |
651 | !filter_rnh && |
652 | !sameaddr(&this->host_nexthop, &that->host_addr)) { |
653 | if (is_left) { |
654 | jam_string(buf, "---"); |
655 | } |
656 | jam_address(buf, &this->host_nexthop); |
657 | if (!is_left) { |
658 | jam_string(buf, "---"); |
659 | } |
660 | } |
661 | } |
662 | |
663 | void jam_end(struct jambuf *buf, const struct end *this, const struct end *that, |
664 | bool_Bool is_left, lset_t policy, bool_Bool filter_rnh) |
665 | { |
666 | if (is_left) { |
667 | /* CLIENT=== */ |
668 | jam_end_client(buf, this, policy, is_left); |
669 | /* HOST */ |
670 | jam_end_host(buf, this, policy); |
671 | /* [ID+OPTS] */ |
672 | jam_end_id(buf, this); |
673 | /* /PROTOCOL:PORT */ |
674 | jam_end_protoport(buf, this); |
675 | /* ---NEXTHOP */ |
676 | jam_end_nexthop(buf, this, that, filter_rnh, is_left); |
677 | } else { |
678 | /* HOPNEXT--- */ |
679 | jam_end_nexthop(buf, this, that, filter_rnh, is_left); |
680 | /* HOST */ |
681 | jam_end_host(buf, this, policy); |
682 | /* [ID+OPTS] */ |
683 | jam_end_id(buf, this); |
684 | /* /PROTOCOL:PORT */ |
685 | jam_end_protoport(buf, this); |
686 | /* ===CLIENT */ |
687 | jam_end_client(buf, this, policy, is_left); |
688 | } |
689 | } |
690 | |
691 | /* |
692 | * format topology of a connection. |
693 | * Two symmetric ends separated by ... |
694 | */ |
695 | |
696 | #define END_BUF(sizeof(subnet_buf) + sizeof(address_buf) + sizeof(id_buf) + sizeof (subnet_buf) + 10) (sizeof(subnet_buf) + sizeof(address_buf) + sizeof(id_buf) + sizeof(subnet_buf) + 10) |
697 | #define CONN_BUF_LEN(2 * ((sizeof(subnet_buf) + sizeof(address_buf) + sizeof(id_buf ) + sizeof(subnet_buf) + 10) - 1) + 4) (2 * (END_BUF(sizeof(subnet_buf) + sizeof(address_buf) + sizeof(id_buf) + sizeof (subnet_buf) + 10) - 1) + 4) |
698 | |
699 | static char *format_connection(char *buf, size_t buf_len, |
700 | const struct connection *c, |
701 | const struct spd_route *sr) |
702 | { |
703 | struct jambuf b = array_as_jambuf(buf, buf_len); |
704 | jam_end(&b, &sr->this, &sr->that, /*left?*/true1, LEMPTY((lset_t)0), false0); |
705 | jam(&b, "..."); |
706 | jam_end(&b, &sr->that, &sr->this, /*left?*/false0, c->policy, oriented(c)); |
707 | return buf; |
708 | } |
709 | |
710 | /* spd_route's with end's get copied in xauth.c */ |
711 | void unshare_connection_end(struct end *e) |
712 | { |
713 | e->id = clone_id(&e->id, "unshare connection id"); |
714 | |
715 | if (e->cert.nss_cert != NULL((void*)0)) { |
716 | e->cert.nss_cert = CERT_DupCertificate(e->cert.nss_cert); |
717 | passert(e->cert.nss_cert != NULL)({ _Bool assertion__ = e->cert.nss_cert != ((void*)0); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 717, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "e->cert.nss_cert != ((void*)0)" ); } (void) 1; }); |
718 | } |
719 | |
720 | e->ca = clone_hunk(e->ca, "ca string")({ typeof(e->ca) hunk_ = e->ca; clone_bytes_as_chunk(hunk_ .ptr, hunk_.len, "ca string"); }); |
721 | e->updown = clone_str(e->updown, "updown")((e->updown) == ((void*)0) ? ((void*)0) : clone_bytes((e-> updown), strlen((e->updown)) + 1, ("updown"))); |
722 | e->xauth_username = clone_str(e->xauth_username, "xauth username")((e->xauth_username) == ((void*)0) ? ((void*)0) : clone_bytes ((e->xauth_username), strlen((e->xauth_username)) + 1, ( "xauth username"))); |
723 | e->xauth_password = clone_str(e->xauth_password, "xauth password")((e->xauth_password) == ((void*)0) ? ((void*)0) : clone_bytes ((e->xauth_password), strlen((e->xauth_password)) + 1, ( "xauth password"))); |
724 | e->host_addr_name = clone_str(e->host_addr_name, "host ip")((e->host_addr_name) == ((void*)0) ? ((void*)0) : clone_bytes ((e->host_addr_name), strlen((e->host_addr_name)) + 1, ( "host ip"))); |
725 | e->virt = virtual_ip_addref(e->virt, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 725, }; &here; })); |
726 | if (e->ckaid != NULL((void*)0)) { |
727 | e->ckaid = clone_thing(*e->ckaid, "ckaid")((__typeof__(&(*e->ckaid))) clone_bytes((const void *) &(*e->ckaid), sizeof(*e->ckaid), ("ckaid"))); |
728 | } |
729 | pexpect(e->sec_label.ptr == NULL)({ _Bool assertion__ = e->sec_label.ptr == ((void*)0); if ( !assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 729, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "e->sec_label.ptr == ((void*)0)" ); } assertion__; }); |
730 | } |
731 | |
732 | /* |
733 | * unshare_connection: after a struct connection has been copied, |
734 | * duplicate anything it references so that unshareable resources |
735 | * are no longer shared. Typically strings, but some other things too. |
736 | * |
737 | * Think of this as converting a shallow copy to a deep copy |
738 | * |
739 | * XXX: unshare_connection() and the shallow clone should be merged |
740 | * into a routine that allocates a new connection and then explicitly |
741 | * copy over the data. Cloning pointers and then trying to fix them |
742 | * up after the event is a guaranteed way to create use-after-free |
743 | * problems. |
744 | */ |
745 | static void unshare_connection(struct connection *c) |
746 | { |
747 | c->root_config = NULL((void*)0); |
748 | |
749 | c->foodgroup = clone_str(c->foodgroup, "connection foodgroup")((c->foodgroup) == ((void*)0) ? ((void*)0) : clone_bytes(( c->foodgroup), strlen((c->foodgroup)) + 1, ("connection foodgroup" ))); |
750 | |
751 | c->modecfg_dns = clone_str(c->modecfg_dns,((c->modecfg_dns) == ((void*)0) ? ((void*)0) : clone_bytes ((c->modecfg_dns), strlen((c->modecfg_dns)) + 1, ("connection modecfg_dns" ))) |
752 | "connection modecfg_dns")((c->modecfg_dns) == ((void*)0) ? ((void*)0) : clone_bytes ((c->modecfg_dns), strlen((c->modecfg_dns)) + 1, ("connection modecfg_dns" ))); |
753 | c->modecfg_domains = clone_str(c->modecfg_domains,((c->modecfg_domains) == ((void*)0) ? ((void*)0) : clone_bytes ((c->modecfg_domains), strlen((c->modecfg_domains)) + 1 , ("connection modecfg_domains"))) |
754 | "connection modecfg_domains")((c->modecfg_domains) == ((void*)0) ? ((void*)0) : clone_bytes ((c->modecfg_domains), strlen((c->modecfg_domains)) + 1 , ("connection modecfg_domains"))); |
755 | c->modecfg_banner = clone_str(c->modecfg_banner,((c->modecfg_banner) == ((void*)0) ? ((void*)0) : clone_bytes ((c->modecfg_banner), strlen((c->modecfg_banner)) + 1, ( "connection modecfg_banner"))) |
756 | "connection modecfg_banner")((c->modecfg_banner) == ((void*)0) ? ((void*)0) : clone_bytes ((c->modecfg_banner), strlen((c->modecfg_banner)) + 1, ( "connection modecfg_banner"))); |
757 | c->dnshostname = clone_str(c->dnshostname, "connection dnshostname")((c->dnshostname) == ((void*)0) ? ((void*)0) : clone_bytes ((c->dnshostname), strlen((c->dnshostname)) + 1, ("connection dnshostname" ))); |
758 | |
759 | /* duplicate any alias, adding spaces to the beginning and end */ |
760 | c->connalias = clone_str(c->connalias, "connection alias")((c->connalias) == ((void*)0) ? ((void*)0) : clone_bytes(( c->connalias), strlen((c->connalias)) + 1, ("connection alias" ))); |
761 | |
762 | c->vti_iface = clone_str(c->vti_iface, "connection vti_iface")((c->vti_iface) == ((void*)0) ? ((void*)0) : clone_bytes(( c->vti_iface), strlen((c->vti_iface)) + 1, ("connection vti_iface" ))); |
763 | |
764 | c->redirect_to = clone_str(c->redirect_to,\((c->redirect_to) == ((void*)0) ? ((void*)0) : clone_bytes ((c->redirect_to), strlen((c->redirect_to)) + 1, ("connection redirect_to" ))) |
765 | "connection redirect_to")((c->redirect_to) == ((void*)0) ? ((void*)0) : clone_bytes ((c->redirect_to), strlen((c->redirect_to)) + 1, ("connection redirect_to" ))); |
766 | c->accept_redirect_to = clone_str(c->accept_redirect_to,\((c->accept_redirect_to) == ((void*)0) ? ((void*)0) : clone_bytes ((c->accept_redirect_to), strlen((c->accept_redirect_to )) + 1, ("connection accept_redirect_to"))) |
767 | "connection accept_redirect_to")((c->accept_redirect_to) == ((void*)0) ? ((void*)0) : clone_bytes ((c->accept_redirect_to), strlen((c->accept_redirect_to )) + 1, ("connection accept_redirect_to"))); |
768 | |
769 | for (struct spd_route *sr = &c->spd; sr != NULL((void*)0); sr = sr->spd_next) { |
770 | unshare_connection_end(&sr->this); |
771 | unshare_connection_end(&sr->that); |
772 | } |
773 | |
774 | /* increment references to algo's, if any */ |
775 | proposals_addref(&c->ike_proposals.p); |
776 | proposals_addref(&c->child_proposals.p); |
777 | c->v2_ike_proposals = NULL((void*)0); /* don't share IKE proposals */ |
778 | |
779 | if (c->pool != NULL((void*)0)) |
780 | reference_addresspool(c); |
781 | |
782 | if (IS_XFRMI1 && c->xfrmi != NULL((void*)0)) |
783 | reference_xfrmi(c); |
784 | } |
785 | |
786 | static int extract_end(struct connection *c, |
787 | struct end *dst, |
788 | struct config_end *config_end, |
789 | const struct whack_end *src, |
790 | struct end *other_end, |
791 | struct logger *logger/*connection "..."*/) |
792 | { |
793 | passert(dst->config == config_end)({ _Bool assertion__ = dst->config == config_end; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 793 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "dst->config == config_end" ); } (void) 1; }); |
794 | const char *leftright = dst->config->leftright; |
795 | bool_Bool same_ca = 0; |
796 | |
797 | /* |
798 | * decode id, if any |
799 | * |
800 | * For %fromcert, the load_end_cert*() call will update it. |
801 | */ |
802 | if (src->id == NULL((void*)0)) { |
803 | dst->id.kind = ID_NONE; |
804 | } else { |
805 | /* |
806 | * Cannot report errors due to low level nesting of functions, |
807 | * since it will try literal IP string conversions first. But |
808 | * atoid() will log real failures like illegal DNS chars already, |
809 | * and for @string ID's all chars are valid without processing. |
810 | */ |
811 | atoid(src->id, &dst->id); |
812 | } |
813 | |
814 | /* decode CA distinguished name, if any */ |
815 | dst->ca = EMPTY_CHUNK((const chunk_t) { .ptr = ((void*)0), .len = 0 }); |
816 | if (src->ca != NULL((void*)0)) { |
817 | if (streq(src->ca, "%same")(strcmp((src->ca), ("%same")) == 0)) { |
818 | same_ca = 1; |
819 | } else if (!streq(src->ca, "%any")(strcmp((src->ca), ("%any")) == 0)) { |
820 | err_t ugh; |
821 | |
822 | /* convert the CA into a DN blob */ |
823 | free_chunk_content(&dst->ca); |
824 | ugh = atodn(src->ca, &dst->ca); |
825 | if (ugh != NULL((void*)0)) { |
826 | llog(RC_LOG, logger, |
827 | "bad %s CA string '%s': %s (ignored)", |
828 | leftright, src->ca, ugh); |
829 | } else { |
830 | /* now try converting it back; isn't failing this a bug? */ |
831 | ugh = parse_dn(dst->ca); |
832 | if (ugh != NULL((void*)0)) { |
833 | llog(RC_LOG, logger, |
834 | "error parsing %s CA converted to DN: %s", |
835 | leftright, ugh); |
836 | DBG_dump_hunk(NULL, dst->ca){ typeof(dst->ca) hunk_ = dst->ca; DBG_dump(((void*)0), hunk_.ptr, hunk_.len); }; |
837 | } |
838 | } |
839 | |
840 | } |
841 | } |
842 | |
843 | /* |
844 | * Try to find the cert / private key. |
845 | * |
846 | * XXX: Be lazy and simply warn about combinations such as |
847 | * cert+ckaid. |
848 | * |
849 | * Should this instead cross check? |
850 | */ |
851 | if (src->cert != NULL((void*)0)) { |
852 | if (src->ckaid != NULL((void*)0)) { |
853 | llog(RC_LOG, logger, |
854 | "warning: ignoring %s ckaid '%s' and using %s certificate '%s'", |
855 | leftright, src->cert, |
856 | leftright, src->cert); |
857 | } |
858 | if (src->rsasigkey != NULL((void*)0)) { |
859 | llog(RC_LOG, logger, |
860 | "warning: ignoring %s rsasigkey '%s' and using %s certificate '%s'", |
861 | leftright, src->cert, |
862 | leftright, src->cert); |
863 | } |
864 | CERTCertificate *cert = get_cert_by_nickname_from_nss(src->cert, logger); |
865 | if (cert == NULL((void*)0)) { |
866 | llog(RC_FATAL, logger, |
867 | "failed to add connection: %s certificate '%s' not found in the NSS database", |
868 | leftright, src->cert); |
869 | return -1; /* fatal */ |
870 | } |
871 | diag_t diag = add_end_cert_and_preload_private_key(cert, dst, |
872 | same_ca/*preserve_ca*/, |
873 | logger); |
874 | if (diag != NULL((void*)0)) { |
875 | llog_diag(RC_FATAL, logger, &diag, "failed to add connection: "); |
876 | CERT_DestroyCertificate(cert); |
877 | return -1; |
878 | } |
879 | } else if (src->rsasigkey != NULL((void*)0)) { |
880 | if (src->ckaid != NULL((void*)0)) { |
881 | llog(RC_LOG, logger, |
882 | "warning: ignoring %s ckaid '%s' and using %s rsasigkey", |
883 | leftright, src->ckaid, leftright); |
884 | } |
885 | /* |
886 | * XXX: hack: whack will load the rsasigkey in a |
887 | * second message, this code just extracts the ckaid. |
888 | */ |
889 | const struct pubkey_type *type = &pubkey_type_rsa; |
890 | /* XXX: lifted from starter_whack_add_pubkey() */ |
891 | char err_buf[TTODATAV_BUF40]; |
892 | char keyspace[1024 + 4]; |
893 | size_t keylen; |
894 | |
895 | /* ??? this value of err isn't used */ |
896 | err_t err = ttodatav(src->rsasigkey, 0, 0, |
Value stored to 'err' during its initialization is never read | |
897 | keyspace, sizeof(keyspace), &keylen, |
898 | err_buf, sizeof(err_buf), 0); |
899 | union pubkey_content pkc; |
900 | keyid_t pubkey; |
901 | ckaid_t ckaid; |
902 | size_t size; |
903 | err = type->unpack_pubkey_content(&pkc, &pubkey, &ckaid, &size, |
904 | chunk2(keyspace, keylen)); |
905 | if (err != NULL((void*)0)) { |
906 | llog(RC_FATAL, logger, |
907 | "failed to add connection: %s raw public key invalid: %s", |
908 | leftright, err); |
909 | return -1; |
910 | } |
911 | ckaid_buf ckb; |
912 | dbg("saving %s CKAID %s extracted from raw %s public key",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("saving %s CKAID %s extracted from raw %s public key" , leftright, str_ckaid(&ckaid, &ckb), type->name); } } |
913 | leftright, str_ckaid(&ckaid, &ckb), type->name){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("saving %s CKAID %s extracted from raw %s public key" , leftright, str_ckaid(&ckaid, &ckb), type->name); } }; |
914 | dst->ckaid = clone_const_thing(ckaid, "raw pubkey's ckaid")clone_bytes((const void *)&(ckaid), sizeof(ckaid), ("raw pubkey's ckaid" )); |
915 | type->free_pubkey_content(&pkc); |
916 | /* try to pre-load the private key */ |
917 | bool_Bool load_needed; |
918 | err = preload_private_key_by_ckaid(&ckaid, &load_needed, logger); |
919 | if (err != NULL((void*)0)) { |
920 | ckaid_buf ckb; |
921 | dbg("no private key matching %s CKAID %s: %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no private key matching %s CKAID %s: %s", leftright , str_ckaid(dst->ckaid, &ckb), err); } } |
922 | leftright, str_ckaid(dst->ckaid, &ckb), err){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no private key matching %s CKAID %s: %s", leftright , str_ckaid(dst->ckaid, &ckb), err); } }; |
923 | } else if (load_needed) { |
924 | ckaid_buf ckb; |
925 | llog(RC_LOG|LOG_STREAM/*not-whack-for-now*/, logger, |
926 | "loaded private key matching %s CKAID %s", |
927 | leftright, str_ckaid(dst->ckaid, &ckb)); |
928 | } |
929 | } else if (src->ckaid != NULL((void*)0)) { |
930 | ckaid_t ckaid; |
931 | err_t err = string_to_ckaid(src->ckaid, &ckaid); |
932 | if (err != NULL((void*)0)) { |
933 | /* should have been rejected by whack? */ |
934 | /* XXX: don't trust whack */ |
935 | llog(RC_FATAL, logger, |
936 | "failed to add connection: %s CKAID '%s' invalid: %s", |
937 | leftright, src->ckaid, err); |
938 | return -1; /* fatal */ |
939 | } |
940 | /* |
941 | * Always save the CKAID so lazy load of the private |
942 | * key will work. |
943 | */ |
944 | dst->ckaid = clone_thing(ckaid, "end ckaid")((__typeof__(&(ckaid))) clone_bytes((const void *)&(ckaid ), sizeof(ckaid), ("end ckaid"))); |
945 | /* |
946 | * See if there's a certificate matching the CKAID, if |
947 | * not assume things will later find the private key. |
948 | */ |
949 | CERTCertificate *cert = get_cert_by_ckaid_from_nss(&ckaid, logger); |
950 | if (cert != NULL((void*)0)) { |
951 | diag_t diag = add_end_cert_and_preload_private_key(cert, dst, |
952 | same_ca/*preserve_ca*/, |
953 | logger); |
954 | if (diag != NULL((void*)0)) { |
955 | llog_diag(RC_FATAL, logger, &diag, "failed to add connection: "); |
956 | CERT_DestroyCertificate(cert); |
957 | return -1; |
958 | } |
959 | } else { |
960 | dbg("%s CKAID '%s' did not match a certificate in the NSS database",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s CKAID '%s' did not match a certificate in the NSS database" , leftright, src->ckaid); } } |
961 | leftright, src->ckaid){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s CKAID '%s' did not match a certificate in the NSS database" , leftright, src->ckaid); } }; |
962 | /* try to pre-load the private key */ |
963 | bool_Bool load_needed; |
964 | err_t err = preload_private_key_by_ckaid(&ckaid, &load_needed, logger); |
965 | if (err != NULL((void*)0)) { |
966 | ckaid_buf ckb; |
967 | dbg("no private key matching %s CKAID %s: %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no private key matching %s CKAID %s: %s", leftright , str_ckaid(dst->ckaid, &ckb), err); } } |
968 | leftright, str_ckaid(dst->ckaid, &ckb), err){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no private key matching %s CKAID %s: %s", leftright , str_ckaid(dst->ckaid, &ckb), err); } }; |
969 | } else { |
970 | ckaid_buf ckb; |
971 | llog(RC_LOG|LOG_STREAM/*not-whack-for-now*/, logger, |
972 | "loaded private key matching %s CKAID %s", |
973 | leftright, str_ckaid(dst->ckaid, &ckb)); |
974 | } |
975 | } |
976 | } |
977 | |
978 | /* does id have wildcards? */ |
979 | dst->has_id_wildcards = id_count_wildcards(&dst->id) > 0; |
980 | |
981 | /* the rest is simple copying of corresponding fields */ |
982 | dst->host_type = src->host_type; |
983 | dst->host_addr = src->host_addr; |
984 | dst->host_addr_name = clone_str(src->host_addr_name, "host ip")((src->host_addr_name) == ((void*)0) ? ((void*)0) : clone_bytes ((src->host_addr_name), strlen((src->host_addr_name)) + 1, ("host ip"))); |
985 | dst->host_nexthop = src->host_nexthop; |
986 | dst->host_srcip = src->host_srcip; |
987 | dst->host_vtiip = src->host_vtiip; |
988 | dst->ifaceip = src->ifaceip; |
989 | dst->cat = src->cat; |
990 | dst->pool_range = src->pool_range; |
991 | |
992 | dst->xauth_server = src->xauth_server; |
993 | dst->xauth_client = src->xauth_client; |
994 | dst->xauth_username = clone_str(src->xauth_username, "xauth username")((src->xauth_username) == ((void*)0) ? ((void*)0) : clone_bytes ((src->xauth_username), strlen((src->xauth_username)) + 1, ("xauth username"))); |
995 | |
996 | dst->authby = src->authby; |
997 | |
998 | /* save some defaults */ |
999 | config_end->client.subnet = src->client; |
1000 | config_end->client.protoport = src->protoport; |
1001 | |
1002 | /* |
1003 | * .has_client means that .client contains a hardwired value, |
1004 | * if it doesn't then it is filled in later (for instance by |
1005 | * instantiate() calling default_end() after host_addr is |
1006 | * known). |
1007 | */ |
1008 | dst->has_client = src->has_client; |
1009 | if (src->has_client) { |
1010 | if (subnet_is_unset(&src->client)) { |
1011 | llog(RC_BADID, logger, "subnet error - failing to load connection"); |
1012 | return -1; |
1013 | } |
1014 | dst->client = selector_from_subnet_protoport(src->client, |
1015 | src->protoport); |
1016 | } |
1017 | |
1018 | dst->protocol = src->protoport.ipproto; |
1019 | dst->port = src->protoport.hport; |
1020 | dst->has_port_wildcard = protoport_has_any_port(&src->protoport); |
1021 | dst->key_from_DNS_on_demand = src->key_from_DNS_on_demand; |
1022 | dst->updown = clone_str(src->updown, "updown")((src->updown) == ((void*)0) ? ((void*)0) : clone_bytes((src ->updown), strlen((src->updown)) + 1, ("updown"))); |
1023 | dst->sendcert = src->sendcert; |
1024 | |
1025 | config_end->host.ikeport = src->host_ikeport; |
1026 | if (src->host_ikeport > 65535) { |
1027 | llog(RC_BADID, logger, |
1028 | "%sikeport=%u must be between 1..65535, ignored", |
1029 | leftright, src->host_ikeport); |
1030 | config_end->host.ikeport = 0; |
1031 | } |
1032 | |
1033 | /* |
1034 | * see if we can resolve the DNS name right now |
1035 | * XXX this is WRONG, we should do this asynchronously, as part of |
1036 | * the normal loading process |
1037 | */ |
1038 | switch (dst->host_type) { |
1039 | case KH_IPHOSTNAME: |
1040 | { |
1041 | err_t er = ttoaddress_dns(shunk1(dst->host_addr_name), |
1042 | address_type(&dst->host_addr), |
1043 | &dst->host_addr); |
1044 | if (er != NULL((void*)0)) { |
1045 | llog(RC_COMMENT, logger, |
1046 | "failed to convert '%s' at load time: %s", |
1047 | dst->host_addr_name, er); |
1048 | } |
1049 | break; |
1050 | } |
1051 | |
1052 | default: |
1053 | break; |
1054 | } |
1055 | |
1056 | /* |
1057 | * How to add addresspool only for responder? It is not |
1058 | * necessary on the initiator |
1059 | * |
1060 | * Note that, possibly confusingly, it is the client's end |
1061 | * that has the address pool. I.e., set OTHER_END to server. |
1062 | * |
1063 | * Need to also merge in the client/server options provided by |
1064 | * whack - sometimes they are set, sometimes they are not. |
1065 | */ |
1066 | |
1067 | dst->modecfg_server = dst->modecfg_server || src->modecfg_server; |
1068 | dst->modecfg_client = dst->modecfg_client || src->modecfg_client; |
1069 | |
1070 | if (range_size(src->pool_range) > 0) { |
1071 | if (c->pool != NULL((void*)0)) { |
1072 | llog(RC_LOG_SERIOUS, logger, "both left and right define address pools"); |
1073 | return -1; |
1074 | } |
1075 | diag_t d = install_addresspool(src->pool_range, &c->pool); |
1076 | if (d != NULL((void*)0)) { |
1077 | llog_diag(RC_LOG_SERIOUS, c->logger, &d, |
1078 | "invalid %saddresspool: ", leftright); |
1079 | return -1; |
1080 | } |
1081 | other_end->modecfg_server = true1; |
1082 | dst->modecfg_client = true1; |
1083 | } |
1084 | |
1085 | return same_ca; |
1086 | } |
1087 | |
1088 | static diag_t check_connection_end(const struct whack_end *this, |
1089 | const struct whack_end *that, |
1090 | const struct whack_message *wm) |
1091 | { |
1092 | /* |
1093 | * This should have been diagnosed by whack, |
1094 | * so we need not be clear. |
1095 | * |
1096 | * XXX: don't trust whack. |
1097 | * XXX: don't assume values were set (defaulted). |
1098 | * XXX: don't assume unset's type is NULL. |
1099 | * XXX: because both directions are tested some checks are redundant. |
1100 | */ |
1101 | |
1102 | /* |
1103 | * Find a type for the host addresses. Order search by what |
1104 | * was most liklely specified. |
1105 | */ |
1106 | const struct ip_info *type = (!address_is_unset(&this->host_addr) ? address_type(&this->host_addr) : |
1107 | !address_is_unset(&this->host_nexthop) ? address_type(&this->host_nexthop) : |
1108 | NULL((void*)0)); |
1109 | |
1110 | if (type != NULL((void*)0)) { |
1111 | if (!address_is_unset(&this->host_nexthop) && |
1112 | address_type(&this->host_nexthop) != type) { |
1113 | return diag("host address family inconsistent: expecting %s but %snexthop is %s", |
1114 | type->ip_name, this->leftright, address_type(&this->host_nexthop)->ip_name); |
1115 | } |
1116 | if (!address_is_unset(&that->host_addr) && |
1117 | address_type(&that->host_addr) != type) { |
1118 | return diag("host address family inconsistent: expecting %s but %shost is %s", |
1119 | type->ip_name, that->leftright, address_type(&that->host_addr)->ip_name); |
1120 | } |
1121 | if (!address_is_unset(&that->host_nexthop) && |
1122 | address_type(&that->host_nexthop) != type) { |
1123 | return diag("host address family inconsistent: expecting %s but %snexthop is %s", |
1124 | type->ip_name, that->leftright, address_type(&that->host_nexthop)->ip_name); |
1125 | } |
1126 | } |
1127 | |
1128 | /* ??? seems like a nasty test (in-band, low-level) */ |
1129 | /* XXX: still nasty; just less low-level */ |
1130 | if (range_size(this->pool_range) > 0) { |
1131 | struct ip_pool *pool; /* ignore */ |
1132 | diag_t d = find_addresspool(this->pool_range, &pool); |
1133 | if (d != NULL((void*)0)) { |
1134 | return d; |
1135 | } |
1136 | } |
1137 | |
1138 | const struct ip_info *this_afi = subnet_type(&this->client); |
1139 | const struct ip_info *that_afi = subnet_type(&that->client); |
1140 | if (this_afi != NULL((void*)0) && that_afi != NULL((void*)0) && this_afi != that_afi) { |
1141 | /* IPv4 vs IPv6? */ |
1142 | return diag("subnets must have the same address family"); |
1143 | } |
1144 | |
1145 | /* MAKE this more sane in the face of unresolved IP addresses */ |
1146 | if (that->host_type != KH_IPHOSTNAME && |
1147 | (address_is_unset(&that->host_addr) || address_is_any(that->host_addr))) { |
1148 | /* |
1149 | * Other side is wildcard: we must check if other conditions |
1150 | * met. |
1151 | */ |
1152 | if (this->host_type != KH_IPHOSTNAME && |
1153 | (address_is_unset(&this->host_addr) || address_is_any(this->host_addr))) { |
1154 | return diag("connection %s must specify host IP address for our side", |
1155 | wm->name); |
1156 | } |
1157 | } |
1158 | |
1159 | if (this->protoport.ipproto == 0 && this->protoport.hport != 0) { |
1160 | return diag("connection %s cannot specify non-zero port %d for prototcol 0", |
1161 | wm->name, this->protoport.hport); |
1162 | } |
1163 | |
1164 | if (this->id != NULL((void*)0) && streq(this->id, "%fromcert")(strcmp((this->id), ("%fromcert")) == 0)) { |
1165 | lset_t auth_pol = (wm->policy & POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) ))); |
1166 | |
1167 | if (this->authby == AUTHBY_PSK || this->authby == AUTHBY_NULL || |
1168 | auth_pol == POLICY_PSK((lset_t)1 << (POLICY_PSK_IX)) || auth_pol == POLICY_AUTH_NULL((lset_t)1 << (POLICY_AUTH_NULL_IX))) { |
1169 | return diag("ID cannot be specified as %%fromcert if PSK or AUTH-NULL is used"); |
1170 | } |
1171 | } |
1172 | |
1173 | return NULL((void*)0); /* happy */ |
1174 | } |
1175 | |
1176 | diag_t add_end_cert_and_preload_private_key(CERTCertificate *cert, |
1177 | struct end *dst_end, |
1178 | bool_Bool preserve_ca, |
1179 | struct logger *logger) |
1180 | { |
1181 | passert(cert != NULL)({ _Bool assertion__ = cert != ((void*)0); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 1181 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "cert != ((void*)0)"); } ( void) 1; }); |
1182 | dst_end->cert.nss_cert = NULL((void*)0); |
1183 | const char *nickname = cert->nickname; |
1184 | const char *leftright = dst_end->config->leftright; |
1185 | |
1186 | /* |
1187 | * A copy of this code lives in nss_cert_verify.c :/ |
1188 | * Currently only a check for RSA is needed, as the only ECDSA |
1189 | * key size not allowed in FIPS mode (p192 curve), is not implemented |
1190 | * by NSS. |
1191 | * See also RSA_secret_sane() and ECDSA_secret_sane() |
1192 | */ |
1193 | if (libreswan_fipsmode()) { |
1194 | SECKEYPublicKey *pk = CERT_ExtractPublicKey(cert); |
1195 | passert(pk != NULL)({ _Bool assertion__ = pk != ((void*)0); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__ , .file = "programs/pluto/connections.c", .line = 1195, }; & here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "pk != ((void*)0)"); } (void ) 1; }); |
1196 | if (pk->keyType == rsaKey && |
1197 | ((pk->u.rsa.modulus.len * BITS_PER_BYTE8) < FIPS_MIN_RSA_KEY_SIZE2048)) { |
1198 | SECKEY_DestroyPublicKey(pk); |
1199 | return diag("FIPS: rejecting %s certificate '%s' with key size %d which is under %d", |
1200 | leftright, nickname, |
1201 | pk->u.rsa.modulus.len * BITS_PER_BYTE8, |
1202 | FIPS_MIN_RSA_KEY_SIZE2048); |
1203 | } |
1204 | /* TODO FORCE MINIMUM SIZE ECDSA KEY */ |
1205 | SECKEY_DestroyPublicKey(pk); |
1206 | } |
1207 | |
1208 | /* XXX: should this be after validity check? */ |
1209 | select_nss_cert_id(cert, &dst_end->id); |
1210 | |
1211 | /* check validity of cert */ |
1212 | if (CERT_CheckCertValidTimes(cert, PR_Now(), false0) != |
1213 | secCertTimeValid) { |
1214 | return diag("%s certificate '%s' is expired or not yet valid", |
1215 | leftright, nickname); |
1216 | } |
1217 | |
1218 | dbg("loading %s certificate \'%s\' pubkey", leftright, nickname){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("loading %s certificate \'%s\' pubkey", leftright , nickname); } }; |
1219 | if (!add_pubkey_from_nss_cert(&pluto_pubkeys, &dst_end->id, cert, logger)) { |
1220 | /* XXX: push diag_t into add_pubkey_from_nss_cert()? */ |
1221 | return diag("%s certificate \'%s\' pubkey could not be loaded", |
1222 | leftright, nickname); |
1223 | } |
1224 | |
1225 | dst_end->cert.nss_cert = cert; |
1226 | |
1227 | /* |
1228 | * If no CA is defined, use issuer as default; but only when |
1229 | * update is ok. |
1230 | * |
1231 | */ |
1232 | if (preserve_ca || dst_end->ca.ptr != NULL((void*)0)) { |
1233 | dbg("preserving existing %s ca", leftright){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("preserving existing %s ca", leftright); } }; |
1234 | } else { |
1235 | dst_end->ca = clone_secitem_as_chunk(cert->derIssuer, "issuer ca"); |
1236 | } |
1237 | |
1238 | /* |
1239 | * Try to pre-load the certificate's secret (private key) into |
1240 | * the local cache (see keys.c). |
1241 | * |
1242 | * This can fail. For instance, this end may only have the |
1243 | * peer's certificate |
1244 | * |
1245 | * This could also fail because a needed secret is missing. |
1246 | * That case is handled by refine_host_connection / |
1247 | * get_psk. |
1248 | */ |
1249 | dbg("preload cert/secret for connection: %s", cert->nickname){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("preload cert/secret for connection: %s", cert ->nickname); } }; |
1250 | bool_Bool load_needed; |
1251 | err_t ugh = preload_private_key_by_cert(&dst_end->cert, &load_needed, logger); |
1252 | if (ugh != NULL((void*)0)) { |
1253 | dbg("no private key matching %s certificate %s: %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no private key matching %s certificate %s: %s" , leftright, nickname, ugh); } } |
1254 | leftright, nickname, ugh){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no private key matching %s certificate %s: %s" , leftright, nickname, ugh); } }; |
1255 | } else if (load_needed) { |
1256 | llog(RC_LOG|LOG_STREAM/*not-whack-for-now*/, logger, |
1257 | "loaded private key matching %s certificate '%s'", |
1258 | leftright, nickname); |
1259 | } |
1260 | return NULL((void*)0); |
1261 | } |
1262 | |
1263 | /* only used by add_connection() */ |
1264 | static void mark_parse(/*const*/ char *wmmark, |
1265 | struct sa_mark *sa_mark, |
1266 | struct logger *logger/*connection "...":*/) |
1267 | { |
1268 | /*const*/ char *val_end; |
1269 | |
1270 | sa_mark->unique = false0; |
1271 | sa_mark->val = 0xffffffff; |
1272 | sa_mark->mask = 0xffffffff; |
1273 | if (streq(wmmark, "-1")(strcmp((wmmark), ("-1")) == 0) || startswith(wmmark, "-1/")(strncmp(((wmmark)), (("-1/")), (strlen("-1/"))) == 0)) { |
1274 | sa_mark->unique = true1; |
1275 | val_end = wmmark + strlen("-1"); |
1276 | } else { |
1277 | errno(*__errno_location ()) = 0; |
1278 | unsigned long v = strtoul(wmmark, &val_end, 0); |
1279 | if (errno(*__errno_location ()) != 0 || v > 0xffffffff || |
1280 | (*val_end != '\0' && *val_end != '/')) |
1281 | { |
1282 | /* ??? should be detected and reported by confread and whack */ |
1283 | /* XXX: don't trust whack */ |
1284 | llog(RC_LOG_SERIOUS, logger, |
1285 | "bad mark value \"%s\"", wmmark); |
1286 | } else { |
1287 | sa_mark->val = v; |
1288 | } |
1289 | } |
1290 | |
1291 | if (*val_end == '/') { |
1292 | /*const*/ char *mask_end; |
1293 | errno(*__errno_location ()) = 0; |
1294 | unsigned long v = strtoul(val_end+1, &mask_end, 0); |
1295 | if (errno(*__errno_location ()) != 0 || v > 0xffffffff || *mask_end != '\0') { |
1296 | /* ??? should be detected and reported by confread and whack */ |
1297 | /* XXX: don't trust whack */ |
1298 | llog(RC_LOG_SERIOUS, logger, |
1299 | "bad mark mask \"%s\"", mask_end); |
1300 | } else { |
1301 | sa_mark->mask = v; |
1302 | } |
1303 | } |
1304 | if ((sa_mark->val & ~sa_mark->mask) != 0) { |
1305 | /* ??? should be detected and reported by confread and whack */ |
1306 | /* XXX: don't trust whack */ |
1307 | llog(RC_LOG_SERIOUS, logger, |
1308 | "mark value %#08" PRIx32"x" " has bits outside mask %#08" PRIx32"x", |
1309 | sa_mark->val, sa_mark->mask); |
1310 | } |
1311 | } |
1312 | |
1313 | /* |
1314 | * Extract the connection detail from the whack message WM and store |
1315 | * them in the connection C. |
1316 | * |
1317 | * This code is responsible for cloning strings and other structures |
1318 | * so that they out live the whack message. When things go wrong, |
1319 | * return false, the caller will then use delete_connection() to free |
1320 | * the partially constructed connection. |
1321 | * |
1322 | * Checks from confread/whack should be moved here so it is similar |
1323 | * for all methods of loading a connection. |
1324 | * |
1325 | * XXX: at one point this code was populating the connection with |
1326 | * pointer's to the whack message's strings and then trying to use |
1327 | * unshare_connection() to create local copies. Bad idea. For |
1328 | * instance, it duplicated the proposal pointers yet here the pointer |
1329 | * was freshy allocated so no duplication should be needed (or at |
1330 | * least shouldn't be) (look for strange free() vs delref() sequence). |
1331 | */ |
1332 | static bool_Bool extract_connection(const struct whack_message *wm, |
1333 | struct connection *c) |
1334 | { |
1335 | diag_t d; |
1336 | |
1337 | struct config *config = alloc_thing(struct config, "root config")((struct config*) alloc_bytes(sizeof(struct config), ("root config" ))); |
1338 | c->root_config = config; /* writeable; root only */ |
1339 | c->config = config; /* read only; shared */ |
1340 | |
1341 | passert(c->name != NULL)({ _Bool assertion__ = c->name != ((void*)0); if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 1341 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "c->name != ((void*)0)" ); } (void) 1; }); /* see alloc_connection() */ |
1342 | |
1343 | if ((wm->policy & POLICY_COMPRESS((lset_t)1 << (POLICY_COMPRESS_IX))) && !can_do_IPcomp) { |
1344 | llog(RC_FATAL, c->logger, |
1345 | "failed to add connection with compress because kernel is not configured to do IPCOMP"); |
1346 | return false0; |
1347 | } |
1348 | |
1349 | if ((wm->policy & POLICY_TUNNEL((lset_t)1 << (POLICY_TUNNEL_IX))) == LEMPTY((lset_t)0)) { |
1350 | if (wm->sa_tfcpad != 0) { |
1351 | llog(RC_FATAL, c->logger, |
1352 | "failed to add connection: connection with type=transport cannot specify tfc="); |
1353 | return false0; |
1354 | } |
1355 | if (wm->vti_iface != NULL((void*)0)) { |
1356 | llog(RC_FATAL, c->logger, |
1357 | "failed to add connection: VTI requires tunnel mode but connection specifies type=transport"); |
1358 | return false0; |
1359 | } |
1360 | } |
1361 | if (LIN(POLICY_AUTHENTICATE, wm->policy)(((((lset_t)1 << (POLICY_AUTHENTICATE_IX))) & (wm-> policy)) == (((lset_t)1 << (POLICY_AUTHENTICATE_IX))))) { |
1362 | if (wm->sa_tfcpad != 0) { |
1363 | llog(RC_FATAL, c->logger, |
1364 | "failed to add connection: connection with phase2=ah cannot specify tfc="); |
1365 | return false0; |
1366 | } |
1367 | } |
1368 | |
1369 | if (LIN(POLICY_AUTH_NEVER, wm->policy)(((((lset_t)1 << (POLICY_AUTH_NEVER_IX))) & (wm-> policy)) == (((lset_t)1 << (POLICY_AUTH_NEVER_IX))))) { |
1370 | if ((wm->policy & POLICY_SHUNT_MASK(((lset_t)1 << (POLICY_SHUNT1_IX)) - ((lset_t)1 << (POLICY_SHUNT0_IX)) + ((lset_t)1 << (POLICY_SHUNT1_IX) ))) == POLICY_SHUNT_TRAP(SHUNT_DEFAULT * ((lset_t)1 << (POLICY_SHUNT0_IX)))) { |
1371 | llog(RC_FATAL, c->logger, |
1372 | "failed to add connection: connection with authby=never must specify shunt type via type="); |
1373 | return false0; |
1374 | } |
1375 | } |
1376 | if ((wm->policy & POLICY_SHUNT_MASK(((lset_t)1 << (POLICY_SHUNT1_IX)) - ((lset_t)1 << (POLICY_SHUNT0_IX)) + ((lset_t)1 << (POLICY_SHUNT1_IX) ))) != POLICY_SHUNT_TRAP(SHUNT_DEFAULT * ((lset_t)1 << (POLICY_SHUNT0_IX)))) { |
1377 | if ((wm->policy & (POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) )) & ~POLICY_AUTH_NEVER((lset_t)1 << (POLICY_AUTH_NEVER_IX)))) != LEMPTY((lset_t)0)) { |
1378 | llog(RC_FATAL, c->logger, |
1379 | "failed to add connection: shunt connection cannot have authentication method other then authby=never"); |
1380 | return false0; |
1381 | } |
1382 | } else { |
1383 | switch (wm->policy & (POLICY_AUTHENTICATE((lset_t)1 << (POLICY_AUTHENTICATE_IX)) | POLICY_ENCRYPT((lset_t)1 << (POLICY_ENCRYPT_IX)))) { |
1384 | case LEMPTY((lset_t)0): |
1385 | if (!LIN(POLICY_AUTH_NEVER, wm->policy)(((((lset_t)1 << (POLICY_AUTH_NEVER_IX))) & (wm-> policy)) == (((lset_t)1 << (POLICY_AUTH_NEVER_IX))))) { |
1386 | llog(RC_FATAL, c->logger, |
1387 | "failed to add connection: non-shunt connection must have AH or ESP"); |
1388 | return false0; |
1389 | } |
1390 | break; |
1391 | case POLICY_AUTHENTICATE((lset_t)1 << (POLICY_AUTHENTICATE_IX)) | POLICY_ENCRYPT((lset_t)1 << (POLICY_ENCRYPT_IX)): |
1392 | llog(RC_FATAL, c->logger, |
1393 | "failed to add connection: non-shunt connection must not specify both AH and ESP"); |
1394 | return false0; |
1395 | } |
1396 | } |
1397 | |
1398 | if (wm->ike_version == IKEv1) { |
1399 | #ifdef USE_IKEv11 |
1400 | if (pluto_ikev1_pol != GLOBAL_IKEv1_ACCEPT) { |
1401 | llog(RC_FATAL, c->logger, |
1402 | "failed to add IKEv1 connection: global ikev1-policy does not allow IKEv1 connections"); |
1403 | return false0; |
1404 | } |
1405 | #else |
1406 | llog(RC_FATAL, c->logger, "failed to add IKEv1 connection: IKEv1 support not compiled in"); |
1407 | return false0; |
1408 | #endif |
1409 | } |
1410 | c->ike_version = wm->ike_version; |
1411 | |
1412 | if (wm->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX)) && |
1413 | c->ike_version == IKEv1) { |
1414 | llog(RC_FATAL, c->logger, |
1415 | "failed to add connection: opportunistic connection MUST have IKEv2"); |
1416 | return false0; |
1417 | } |
1418 | |
1419 | if (wm->policy & POLICY_MOBIKE((lset_t)1 << (POLICY_MOBIKE_IX)) && |
1420 | c->ike_version == IKEv1) { |
1421 | llog(RC_FATAL, c->logger, |
1422 | "failed to add connection: MOBIKE requires IKEv2"); |
1423 | return false0; |
1424 | } |
1425 | |
1426 | if (wm->policy & POLICY_IKEV2_ALLOW_NARROWING((lset_t)1 << (POLICY_IKEV2_ALLOW_NARROWING_IX)) && |
1427 | c->ike_version == IKEv1) { |
1428 | llog(RC_FATAL, c->logger, |
1429 | "failed to add connection: narrowing=yes requires IKEv2"); |
1430 | return false0; |
1431 | } |
1432 | |
1433 | if (wm->iketcp != IKE_TCP_NO && |
1434 | c->ike_version != IKEv2) { |
1435 | llog(RC_FATAL, c->logger, |
1436 | "failed to add connection: enable-tcp= requires IKEv2"); |
1437 | return false0; |
1438 | } |
1439 | |
1440 | if (wm->policy & POLICY_MOBIKE((lset_t)1 << (POLICY_MOBIKE_IX))) { |
1441 | if (kernel_ops->migrate_sa_check == NULL((void*)0)) { |
1442 | llog(RC_FATAL, c->logger, |
1443 | "failed to add connection: MOBIKE not supported by %s interface", |
1444 | kernel_ops->kern_name); |
1445 | return false0; |
1446 | } |
1447 | /* probe the interface */ |
1448 | err_t err = kernel_ops->migrate_sa_check(c->logger); |
1449 | if (err != NULL((void*)0)) { |
1450 | llog(RC_FATAL, c->logger, |
1451 | "failed to add connection: MOBIKE kernel support missing for %s interface: %s", |
1452 | kernel_ops->kern_name, err); |
1453 | return false0; |
1454 | } |
1455 | } |
1456 | |
1457 | if (wm->iketcp != IKE_TCP_NO && (wm->remote_tcpport == 0 || wm->remote_tcpport == 500)) { |
1458 | llog(RC_FATAL, c->logger, |
1459 | "failed to add connection: tcp-remoteport cannot be 0 or 500"); |
1460 | return false0; |
1461 | } |
1462 | |
1463 | /* we could complain about a lot more whack strings */ |
1464 | if (NEVER_NEGOTIATE(wm->policy)(((((wm->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0)))) { |
1465 | if (wm->ike != NULL((void*)0)) { |
1466 | llog(RC_INFORMATIONAL, c->logger, |
1467 | "ignored ike= option for type=passthrough connection"); |
1468 | } |
1469 | if (wm->esp != NULL((void*)0)) { |
1470 | llog(RC_INFORMATIONAL, c->logger, |
1471 | "ignored esp= option for type=passthrough connection"); |
1472 | } |
1473 | if (wm->iketcp != IKE_TCP_NO) { |
1474 | llog(RC_INFORMATIONAL, c->logger, |
1475 | "ignored enable-tcp= option for type=passthrough connection"); |
1476 | } |
1477 | if (wm->left.authby != AUTHBY_UNSET || wm->right.authby != AUTHBY_UNSET) { |
1478 | llog(RC_FATAL, c->logger, |
1479 | "failed to add connection: leftauth= / rightauth= options are invalid for type=passthrough connection"); |
1480 | return false0; |
1481 | } |
1482 | } else { |
1483 | /* reject all bad combinations of authby with leftauth=/rightauth= */ |
1484 | if (wm->left.authby != AUTHBY_UNSET || wm->right.authby != AUTHBY_UNSET) { |
1485 | if (c->ike_version == IKEv1) { |
1486 | llog(RC_FATAL, c->logger, |
1487 | "failed to add connection: leftauth= and rightauth= require ikev2"); |
1488 | return false0; |
1489 | } |
1490 | if (wm->left.authby == AUTHBY_UNSET || wm->right.authby == AUTHBY_UNSET) { |
1491 | llog(RC_FATAL, c->logger, |
1492 | "failed to add connection: leftauth= and rightauth= must both be set or both be unset"); |
1493 | return false0; |
1494 | } |
1495 | /* ensure no conflicts of set left/rightauth with (set or unset) authby= */ |
1496 | if (wm->left.authby == wm->right.authby) { |
1497 | |
1498 | lset_t auth_pol = (wm->policy & POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) ))); |
1499 | const char *conflict = NULL((void*)0); |
1500 | switch (wm->left.authby) { |
1501 | case AUTHBY_PSK: |
1502 | if (auth_pol != POLICY_PSK((lset_t)1 << (POLICY_PSK_IX)) && auth_pol != LEMPTY((lset_t)0)) { |
1503 | conflict = "leftauthby=secret but authby= is not secret"; |
1504 | } |
1505 | break; |
1506 | case AUTHBY_NULL: |
1507 | if (auth_pol != POLICY_AUTH_NULL((lset_t)1 << (POLICY_AUTH_NULL_IX)) && auth_pol != LEMPTY((lset_t)0)) { |
1508 | conflict = "leftauthby=null but authby= is not null"; |
1509 | } |
1510 | break; |
1511 | case AUTHBY_NEVER: |
1512 | if ((wm->policy & POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) ))) != LEMPTY((lset_t)0)) { |
1513 | conflict = "leftauthby=never but authby= is not never - double huh?"; |
1514 | } |
1515 | break; |
1516 | case AUTHBY_RSASIG: |
1517 | case AUTHBY_ECDSA: |
1518 | /* will be fixed later below */ |
1519 | break; |
1520 | default: |
1521 | bad_case(wm->left.authby)libreswan_bad_case("wm->left.authby", (wm->left.authby) , ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 1521, }; &here ; })); |
1522 | } |
1523 | if (conflict != NULL((void*)0)) { |
1524 | policy_buf pb; |
1525 | llog(RC_FATAL, c->logger, |
1526 | "failed to add connection: leftauth=%s and rightauth=%s conflict with authby=%s, %s", |
1527 | enum_name(&keyword_authby_names, wm->left.authby), |
1528 | enum_name(&keyword_authby_names, wm->right.authby), |
1529 | str_policy(wm->policy & POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) )), &pb), |
1530 | conflict); |
1531 | return false0; |
1532 | } |
1533 | } else { |
1534 | if ((wm->left.authby == AUTHBY_PSK && wm->right.authby == AUTHBY_NULL) || |
1535 | (wm->left.authby == AUTHBY_NULL && wm->right.authby == AUTHBY_PSK)) { |
1536 | llog(RC_FATAL, c->logger, |
1537 | "failed to add connection: cannot mix PSK and NULL authentication (leftauth=%s and rightauth=%s)", |
1538 | enum_name(&keyword_authby_names, wm->left.authby), |
1539 | enum_name(&keyword_authby_names, wm->right.authby)); |
1540 | return false0; |
1541 | } |
1542 | } |
1543 | } |
1544 | } |
1545 | |
1546 | if (protoport_has_any_port(&wm->right.protoport) && |
1547 | protoport_has_any_port(&wm->left.protoport)) { |
1548 | llog(RC_FATAL, c->logger, |
1549 | "failed to add connection: cannot have protoport with %%any on both sides"); |
1550 | return false0; |
1551 | } |
1552 | |
1553 | d = check_connection_end(&wm->right, &wm->left, wm); |
1554 | if (d != NULL((void*)0)) { |
1555 | llog_diag(RC_FATAL, c->logger, &d, "failed to add connection: "); |
1556 | return false0; |
1557 | } |
1558 | d = check_connection_end(&wm->left, &wm->right, wm); |
1559 | if (d != NULL((void*)0)) { |
1560 | llog_diag(RC_FATAL, c->logger, &d, "failed to add connection: "); |
1561 | return false0; |
1562 | } |
1563 | |
1564 | /* duplicate any alias, adding spaces to the beginning and end */ |
1565 | c->connalias = clone_str(wm->connalias, "connection alias")((wm->connalias) == ((void*)0) ? ((void*)0) : clone_bytes( (wm->connalias), strlen((wm->connalias)) + 1, ("connection alias" ))); |
1566 | |
1567 | c->dnshostname = clone_str(wm->dnshostname, "connection dnshostname")((wm->dnshostname) == ((void*)0) ? ((void*)0) : clone_bytes ((wm->dnshostname), strlen((wm->dnshostname)) + 1, ("connection dnshostname" ))); |
1568 | c->policy = wm->policy; |
1569 | /* ignore IKEv2 ECDSA and legacy RSA policies for IKEv1 connections */ |
1570 | if (c->ike_version == IKEv1) |
1571 | c->policy = (c->policy & ~(POLICY_ECDSA((lset_t)1 << (POLICY_ECDSA_IX)) | POLICY_RSASIG_v1_5((lset_t)1 << (POLICY_RSASIG_v1_5_IX)))); |
1572 | /* ignore symmetrical defaults if we got left/rightauth */ |
1573 | if (wm->left.authby != wm->right.authby) |
1574 | c->policy = c->policy & ~POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) )); |
1575 | /* remove default pubkey policy if left/rightauth is specified */ |
1576 | if (wm->left.authby == wm->right.authby) { |
1577 | if (wm->left.authby == AUTHBY_RSASIG) |
1578 | c->policy = (c->policy & ~(POLICY_ECDSA((lset_t)1 << (POLICY_ECDSA_IX)))); |
1579 | if (wm->left.authby == AUTHBY_ECDSA) |
1580 | c->policy = (c->policy & ~(POLICY_RSASIG((lset_t)1 << (POLICY_RSASIG_IX)) | POLICY_RSASIG_v1_5((lset_t)1 << (POLICY_RSASIG_v1_5_IX)))); |
1581 | } |
1582 | /* ignore supplied sighash and ECDSA (from defaults) for IKEv1 */ |
1583 | if (c->ike_version == IKEv2) |
1584 | c->sighash_policy = wm->sighash_policy; |
1585 | |
1586 | if (NEVER_NEGOTIATE(c->policy)(((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0)))) { |
1587 | /* cleanup inherited default */ |
1588 | c->ike_version = 0; |
1589 | c->iketcp = IKE_TCP_NO; |
1590 | c->remote_tcpport = 0; |
1591 | } |
1592 | |
1593 | if (libreswan_fipsmode()) { |
1594 | if (c->policy & POLICY_NEGO_PASS((lset_t)1 << (POLICY_NEGO_PASS_IX))) { |
1595 | c->policy &= ~POLICY_NEGO_PASS((lset_t)1 << (POLICY_NEGO_PASS_IX)); |
1596 | llog(RC_LOG_SERIOUS, c->logger, |
1597 | "FIPS: ignored negotiationshunt=passthrough - packets MUST be blocked in FIPS mode"); |
1598 | } |
1599 | if ((c->policy & POLICY_FAIL_MASK(((lset_t)1 << (POLICY_FAIL1_IX)) - ((lset_t)1 << (POLICY_FAIL0_IX)) + ((lset_t)1 << (POLICY_FAIL1_IX)))) == POLICY_FAIL_PASS(SHUNT_PASS * ((lset_t)1 << (POLICY_FAIL0_IX)))) { |
1600 | c->policy &= ~POLICY_FAIL_MASK(((lset_t)1 << (POLICY_FAIL1_IX)) - ((lset_t)1 << (POLICY_FAIL0_IX)) + ((lset_t)1 << (POLICY_FAIL1_IX))); |
1601 | c->policy |= POLICY_FAIL_NONE(SHUNT_DEFAULT * ((lset_t)1 << (POLICY_FAIL0_IX))); |
1602 | llog(RC_LOG_SERIOUS, c->logger, |
1603 | "FIPS: ignored failureshunt=passthrough - packets MUST be blocked in FIPS mode"); |
1604 | } |
1605 | } |
1606 | |
1607 | policy_buf pb; |
1608 | dbg("added new %s connection %s with policy %s%s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("added new %s connection %s with policy %s%s", enum_name(&ike_version_names, c->ike_version), c-> name, str_policy(c->policy, &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX)) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t)0))) ? "+NEVER_NEGOTIATE" : ""); } } |
1609 | enum_name(&ike_version_names, c->ike_version),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("added new %s connection %s with policy %s%s", enum_name(&ike_version_names, c->ike_version), c-> name, str_policy(c->policy, &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX)) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t)0))) ? "+NEVER_NEGOTIATE" : ""); } } |
1610 | c->name, str_policy(c->policy, &pb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("added new %s connection %s with policy %s%s", enum_name(&ike_version_names, c->ike_version), c-> name, str_policy(c->policy, &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX)) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t)0))) ? "+NEVER_NEGOTIATE" : ""); } } |
1611 | NEVER_NEGOTIATE(c->policy) ? "+NEVER_NEGOTIATE" : ""){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("added new %s connection %s with policy %s%s", enum_name(&ike_version_names, c->ike_version), c-> name, str_policy(c->policy, &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX)) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t)0))) ? "+NEVER_NEGOTIATE" : ""); } }; |
1612 | |
1613 | if (NEVER_NEGOTIATE(wm->policy)(((((wm->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0)))) { |
1614 | /* set default to AUTHBY_NEVER if unset and we do not expect to do IKE */ |
1615 | if (wm->left.authby == AUTHBY_UNSET && wm->right.authby == AUTHBY_UNSET) { |
1616 | if ((c->policy & POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) ))) == LEMPTY((lset_t)0)) { |
1617 | /* authby= was also not specified - fill in default */ |
1618 | c->policy |= POLICY_AUTH_NEVER((lset_t)1 << (POLICY_AUTH_NEVER_IX)); |
1619 | policy_buf pb; |
1620 | dbg("no AUTH policy was set for type=passthrough - defaulting to %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no AUTH policy was set for type=passthrough - defaulting to %s" , str_policy(c->policy & (((lset_t)1 << (POLICY_AUTH_NULL_IX )) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX))), &pb)); } } |
1621 | str_policy(c->policy & POLICY_ID_AUTH_MASK, &pb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no AUTH policy was set for type=passthrough - defaulting to %s" , str_policy(c->policy & (((lset_t)1 << (POLICY_AUTH_NULL_IX )) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX))), &pb)); } }; |
1622 | } |
1623 | } |
1624 | } else { |
1625 | /* set default to RSASIG if unset and we expect to do IKE */ |
1626 | if (wm->left.authby == AUTHBY_UNSET && wm->right.authby == AUTHBY_UNSET) { |
1627 | if ((c->policy & POLICY_ID_AUTH_MASK(((lset_t)1 << (POLICY_AUTH_NULL_IX)) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX) ))) == LEMPTY((lset_t)0)) { |
1628 | /* authby= was also not specified - fill in default */ |
1629 | c->policy |= POLICY_DEFAULT((lset_t)1 << (POLICY_RSASIG_IX)); |
1630 | policy_buf pb; |
1631 | dbg("no AUTH policy was set - defaulting to %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no AUTH policy was set - defaulting to %s", str_policy (c->policy & (((lset_t)1 << (POLICY_AUTH_NULL_IX )) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX))), &pb)); } } |
1632 | str_policy(c->policy & POLICY_ID_AUTH_MASK, &pb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("no AUTH policy was set - defaulting to %s", str_policy (c->policy & (((lset_t)1 << (POLICY_AUTH_NULL_IX )) - ((lset_t)1 << (POLICY_PSK_IX)) + ((lset_t)1 << (POLICY_AUTH_NULL_IX))), &pb)); } }; |
1633 | } |
1634 | } |
1635 | |
1636 | /* fixup symmetric policy flags based on asymmetric ones */ |
1637 | if ((wm->left.authby == AUTHBY_NULL && wm->right.authby == AUTHBY_RSASIG) || |
1638 | (wm->left.authby == AUTHBY_RSASIG && wm->right.authby == AUTHBY_NULL)) { |
1639 | c->policy |= POLICY_RSASIG((lset_t)1 << (POLICY_RSASIG_IX)); |
1640 | } |
1641 | if ((wm->left.authby == AUTHBY_NULL && wm->right.authby == AUTHBY_ECDSA) || |
1642 | (wm->left.authby == AUTHBY_ECDSA && wm->right.authby == AUTHBY_NULL)) { |
1643 | c->policy |= POLICY_ECDSA((lset_t)1 << (POLICY_ECDSA_IX)); |
1644 | } |
1645 | |
1646 | /* IKE cipher suites */ |
1647 | |
1648 | if (!LIN(POLICY_AUTH_NEVER, wm->policy)(((((lset_t)1 << (POLICY_AUTH_NEVER_IX))) & (wm-> policy)) == (((lset_t)1 << (POLICY_AUTH_NEVER_IX)))) && |
1649 | (wm->ike != NULL((void*)0) || c->ike_version == IKEv2)) { |
1650 | const struct proposal_policy proposal_policy = { |
1651 | /* logic needs to match pick_initiator() */ |
1652 | .version = c->ike_version, |
1653 | .alg_is_ok = ike_alg_is_ike, |
1654 | .pfs = LIN(POLICY_PFS, wm->policy)(((((lset_t)1 << (POLICY_PFS_IX))) & (wm->policy )) == (((lset_t)1 << (POLICY_PFS_IX)))), |
1655 | .check_pfs_vs_dh = false0, |
1656 | .logger_rc_flags = ALL_STREAMS|RC_LOG, |
1657 | .logger = c->logger, /* on-stack */ |
1658 | /* let defaults stumble on regardless */ |
1659 | .ignore_parser_errors = (wm->ike == NULL((void*)0)), |
1660 | }; |
1661 | |
1662 | struct proposal_parser *parser = ike_proposal_parser(&proposal_policy); |
1663 | c->ike_proposals.p = proposals_from_str(parser, wm->ike); |
1664 | |
1665 | if (c->ike_proposals.p == NULL((void*)0)) { |
1666 | pexpect(parser->diag != NULL)({ _Bool assertion__ = parser->diag != ((void*)0); if (!assertion__ ) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 1666 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_pexpect(logger_, here_, "%s", "parser->diag != ((void*)0)" ); } assertion__; }); /* something */ |
1667 | llog_diag(RC_FATAL, c->logger, &parser->diag, |
1668 | "failed to add connection: "); |
1669 | free_proposal_parser(&parser); |
1670 | /* caller will free C */ |
1671 | return false0; |
1672 | } |
1673 | free_proposal_parser(&parser); |
1674 | |
1675 | /* from here on, error returns should alg_info_free(&c->ike_proposals->ai); */ |
1676 | |
1677 | LSWDBGP(DBG_BASE, buf)for (_Bool lswlog_p = (cur_debugging & (((lset_t)1 << (DBG_BASE_IX)))); lswlog_p; lswlog_p = 0) for (char lswbuf[( (size_t)1024)], *lswbuf_ = lswbuf; lswbuf_ != ((void*)0); lswbuf_ = ((void*)0)) for (struct jambuf jambuf = array_as_jambuf((lswbuf ), sizeof(lswbuf)), *buf = &jambuf; buf != ((void*)0); buf = ((void*)0)) for (; buf != ((void*)0); jambuf_to_logger(buf , &failsafe_logger, DEBUG_STREAM), buf = ((void*)0)) { |
1678 | jam_string(buf, "ike (phase1) algorithm values: "); |
1679 | jam_proposals(buf, c->ike_proposals.p); |
1680 | } |
1681 | } |
1682 | |
1683 | /* ESP or AH cipher suites (but not both) */ |
1684 | |
1685 | if (wm->esp != NULL((void*)0) || |
1686 | (c->ike_version == IKEv2 && |
1687 | (c->policy & (POLICY_ENCRYPT((lset_t)1 << (POLICY_ENCRYPT_IX))|POLICY_AUTHENTICATE((lset_t)1 << (POLICY_AUTHENTICATE_IX)))))) { |
1688 | const char *esp = wm->esp != NULL((void*)0) ? wm->esp : ""; |
1689 | dbg("from whack: got --esp=%s", esp){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("from whack: got --esp=%s", esp); } }; |
1690 | |
1691 | const struct proposal_policy proposal_policy = { |
1692 | /* |
1693 | * logic needs to match pick_initiator() |
1694 | * |
1695 | * XXX: Once pluto is changed to IKEv1 XOR |
1696 | * IKEv2 it should be possible to move this |
1697 | * magic into pluto proper and instead pass a |
1698 | * simple boolean. |
1699 | */ |
1700 | .version = c->ike_version, |
1701 | .alg_is_ok = kernel_alg_is_ok, |
1702 | .pfs = LIN(POLICY_PFS, wm->policy)(((((lset_t)1 << (POLICY_PFS_IX))) & (wm->policy )) == (((lset_t)1 << (POLICY_PFS_IX)))), |
1703 | .check_pfs_vs_dh = true1, |
1704 | .logger_rc_flags = ALL_STREAMS|RC_LOG, |
1705 | .logger = c->logger, /* on-stack */ |
1706 | /* let defaults stumble on regardless */ |
1707 | .ignore_parser_errors = (wm->esp == NULL((void*)0)), |
1708 | }; |
1709 | |
1710 | /* |
1711 | * We checked above that exactly one of |
1712 | * POLICY_ENCRYPT and POLICY_AUTHENTICATE is on. |
1713 | * The only difference in processing is which |
1714 | * function is called (and those functions are |
1715 | * almost identical). |
1716 | */ |
1717 | struct proposal_parser *(*fn)(const struct proposal_policy *policy) = |
1718 | (c->policy & POLICY_ENCRYPT((lset_t)1 << (POLICY_ENCRYPT_IX))) ? esp_proposal_parser : |
1719 | (c->policy & POLICY_AUTHENTICATE((lset_t)1 << (POLICY_AUTHENTICATE_IX))) ? ah_proposal_parser : |
1720 | NULL((void*)0); |
1721 | passert(fn != NULL)({ _Bool assertion__ = fn != ((void*)0); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__ , .file = "programs/pluto/connections.c", .line = 1721, }; & here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "fn != ((void*)0)"); } (void ) 1; }); |
1722 | struct proposal_parser *parser = fn(&proposal_policy); |
1723 | c->child_proposals.p = proposals_from_str(parser, wm->esp); |
1724 | if (c->child_proposals.p == NULL((void*)0)) { |
1725 | pexpect(parser->diag != NULL)({ _Bool assertion__ = parser->diag != ((void*)0); if (!assertion__ ) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 1725 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_pexpect(logger_, here_, "%s", "parser->diag != ((void*)0)" ); } assertion__; }); |
1726 | llog_diag(RC_FATAL, c->logger, &parser->diag, |
1727 | "failed to add connection: "); |
1728 | free_proposal_parser(&parser); |
1729 | /* caller will free C */ |
1730 | return false0; |
1731 | } |
1732 | free_proposal_parser(&parser); |
1733 | |
1734 | /* from here on, error returns should alg_info_free(&c->child_proposals->ai); */ |
1735 | |
1736 | LSWDBGP(DBG_BASE, buf)for (_Bool lswlog_p = (cur_debugging & (((lset_t)1 << (DBG_BASE_IX)))); lswlog_p; lswlog_p = 0) for (char lswbuf[( (size_t)1024)], *lswbuf_ = lswbuf; lswbuf_ != ((void*)0); lswbuf_ = ((void*)0)) for (struct jambuf jambuf = array_as_jambuf((lswbuf ), sizeof(lswbuf)), *buf = &jambuf; buf != ((void*)0); buf = ((void*)0)) for (; buf != ((void*)0); jambuf_to_logger(buf , &failsafe_logger, DEBUG_STREAM), buf = ((void*)0)) { |
1737 | jam_string(buf, "ESP/AH string values: "); |
1738 | jam_proposals(buf, c->child_proposals.p); |
1739 | }; |
1740 | } |
1741 | |
1742 | c->nic_offload = wm->nic_offload; |
1743 | c->sa_ike_life_seconds = wm->sa_ike_life_seconds; |
1744 | c->sa_ipsec_life_seconds = wm->sa_ipsec_life_seconds; |
1745 | c->sa_rekey_margin = wm->sa_rekey_margin; |
1746 | c->sa_rekey_fuzz = wm->sa_rekey_fuzz; |
1747 | c->sa_keying_tries = wm->sa_keying_tries; |
1748 | c->sa_replay_window = wm->sa_replay_window; |
1749 | config->retransmit_timeout = wm->retransmit_timeout; |
1750 | config->retransmit_interval = wm->retransmit_interval; |
1751 | |
1752 | if (deltatime_cmp(c->sa_rekey_margin, >=, c->sa_ipsec_life_seconds)(deltatime_sub_sign(c->sa_rekey_margin, c->sa_ipsec_life_seconds ) >= 0)) { |
1753 | deltatime_t new_rkm = deltatimescale(1, 2, c->sa_ipsec_life_seconds); |
1754 | |
1755 | llog(RC_LOG, c->logger, |
1756 | "rekeymargin (%jds) >= salifetime (%jds); reducing rekeymargin to %jds seconds", |
1757 | deltasecs(c->sa_rekey_margin), |
1758 | deltasecs(c->sa_ipsec_life_seconds), |
1759 | deltasecs(new_rkm)); |
1760 | |
1761 | c->sa_rekey_margin = new_rkm; |
1762 | } |
1763 | |
1764 | { |
1765 | /* http://csrc.nist.gov/publications/nistpubs/800-77/sp800-77.pdf */ |
1766 | time_t max_ike = libreswan_fipsmode() ? FIPS_IKE_SA_LIFETIME_MAXIMUMsecs_per_hour * 24 : IKE_SA_LIFETIME_MAXIMUMsecs_per_day; |
1767 | time_t max_ipsec = libreswan_fipsmode() ? FIPS_IPSEC_SA_LIFETIME_MAXIMUMsecs_per_hour * 8 : IPSEC_SA_LIFETIME_MAXIMUMsecs_per_day; |
1768 | |
1769 | if (deltasecs(c->sa_ike_life_seconds) > max_ike) { |
1770 | llog(RC_LOG_SERIOUS, c->logger, |
1771 | "IKE lifetime limited to the maximum allowed %jds", |
1772 | (intmax_t) max_ike); |
1773 | c->sa_ike_life_seconds = deltatime(max_ike); |
1774 | } |
1775 | if (deltasecs(c->sa_ipsec_life_seconds) > max_ipsec) { |
1776 | llog(RC_LOG_SERIOUS, c->logger, |
1777 | "IPsec lifetime limited to the maximum allowed %jds", |
1778 | (intmax_t) max_ipsec); |
1779 | c->sa_ipsec_life_seconds = deltatime(max_ipsec); |
1780 | } |
1781 | } |
1782 | |
1783 | /* RFC 3706 DPD */ |
1784 | c->dpd_delay = wm->dpd_delay; |
1785 | c->dpd_timeout = wm->dpd_timeout; |
1786 | c->dpd_action = wm->dpd_action; |
1787 | |
1788 | /* Cisco interop: remote peer type */ |
1789 | c->remotepeertype = wm->remotepeertype; |
1790 | |
1791 | c->metric = wm->metric; |
1792 | c->connmtu = wm->connmtu; |
1793 | c->encaps = wm->encaps; |
1794 | c->nat_keepalive = wm->nat_keepalive; |
1795 | c->ikev1_natt = wm->ikev1_natt; |
1796 | c->initial_contact = wm->initial_contact; |
1797 | c->cisco_unity = wm->cisco_unity; |
1798 | c->fake_strongswan = wm->fake_strongswan; |
1799 | c->send_vendorid = wm->send_vendorid; |
1800 | c->send_ca = wm->send_ca; |
1801 | c->xauthby = wm->xauthby; |
1802 | c->xauthfail = wm->xauthfail; |
1803 | |
1804 | c->modecfg_dns = clone_str(wm->modecfg_dns, "connection modecfg_dns")((wm->modecfg_dns) == ((void*)0) ? ((void*)0) : clone_bytes ((wm->modecfg_dns), strlen((wm->modecfg_dns)) + 1, ("connection modecfg_dns" ))); |
1805 | c->modecfg_domains = clone_str(wm->modecfg_domains, "connection modecfg_domains")((wm->modecfg_domains) == ((void*)0) ? ((void*)0) : clone_bytes ((wm->modecfg_domains), strlen((wm->modecfg_domains)) + 1, ("connection modecfg_domains"))); |
1806 | c->modecfg_banner = clone_str(wm->modecfg_banner, "connection modecfg_banner")((wm->modecfg_banner) == ((void*)0) ? ((void*)0) : clone_bytes ((wm->modecfg_banner), strlen((wm->modecfg_banner)) + 1 , ("connection modecfg_banner"))); |
1807 | |
1808 | /* RFC 5685 - IKEv2 Redirect mechanism */ |
1809 | c->redirect_to = clone_str(wm->redirect_to, "connection redirect_to")((wm->redirect_to) == ((void*)0) ? ((void*)0) : clone_bytes ((wm->redirect_to), strlen((wm->redirect_to)) + 1, ("connection redirect_to" ))); |
1810 | c->accept_redirect_to = clone_str(wm->accept_redirect_to, "connection accept_redirect_to")((wm->accept_redirect_to) == ((void*)0) ? ((void*)0) : clone_bytes ((wm->accept_redirect_to), strlen((wm->accept_redirect_to )) + 1, ("connection accept_redirect_to"))); |
1811 | |
1812 | /* RFC 8229 TCP encap*/ |
1813 | c->remote_tcpport = wm->remote_tcpport; |
1814 | c->iketcp = wm->iketcp; |
1815 | |
1816 | /* |
1817 | * parse mark and mask values form the mark/mask string |
1818 | * acceptable string formats are |
1819 | * ( -1 | <nat> | <hex> ) [ / ( <nat> | <hex> ) ] |
1820 | * examples: |
1821 | * 10 |
1822 | * 10/0xffffffff |
1823 | * 0xA/0xFFFFFFFF |
1824 | * |
1825 | * defaults: |
1826 | * if mark is provided and mask is not mask will default to 0xFFFFFFFF |
1827 | * if nothing is provided mark and mask are set to 0; |
1828 | */ |
1829 | |
1830 | /* mark-in= and mark-out= overwrite mark= */ |
1831 | if (wm->conn_mark_both != NULL((void*)0)) { |
1832 | mark_parse(wm->conn_mark_both, &c->sa_marks.in, c->logger); |
1833 | mark_parse(wm->conn_mark_both, &c->sa_marks.out, c->logger); |
1834 | if (wm->conn_mark_in != NULL((void*)0) || wm->conn_mark_out != NULL((void*)0)) { |
1835 | llog(RC_LOG_SERIOUS, c->logger, |
1836 | "conflicting mark specifications"); |
1837 | } |
1838 | } |
1839 | if (wm->conn_mark_in != NULL((void*)0)) |
1840 | mark_parse(wm->conn_mark_in, &c->sa_marks.in, c->logger); |
1841 | if (wm->conn_mark_out != NULL((void*)0)) |
1842 | mark_parse(wm->conn_mark_out, &c->sa_marks.out, c->logger); |
1843 | |
1844 | c->vti_iface = clone_str(wm->vti_iface, "connection vti_iface")((wm->vti_iface) == ((void*)0) ? ((void*)0) : clone_bytes( (wm->vti_iface), strlen((wm->vti_iface)) + 1, ("connection vti_iface" ))); |
1845 | c->vti_routing = wm->vti_routing; |
1846 | c->vti_shared = wm->vti_shared; |
1847 | #ifdef USE_XFRM_INTERFACE1 |
1848 | if (wm->xfrm_if_id != UINT32_MAX(4294967295U)) { |
1849 | err_t err = xfrm_iface_supported(c->logger); |
1850 | if (err == NULL((void*)0)) { |
1851 | if (!setup_xfrm_interface(c, wm->xfrm_if_id == 0 ? |
1852 | PLUTO_XFRMI_REMAP_IF_ID_ZERO16384 : wm->xfrm_if_id )) |
1853 | return false0; |
1854 | } else { |
1855 | llog(RC_FATAL, c->logger, |
1856 | "failed to add connection: ipsec-interface=%u not supported. %s", |
1857 | wm->xfrm_if_id, err); |
1858 | return false0; |
1859 | } |
1860 | } |
1861 | #endif |
1862 | } |
1863 | |
1864 | #ifdef HAVE_NM1 |
1865 | c->nmconfigured = wm->nmconfigured; |
1866 | #endif |
1867 | |
1868 | c->nflog_group = wm->nflog_group; |
1869 | c->sa_priority = wm->sa_priority; |
1870 | c->sa_tfcpad = wm->sa_tfcpad; |
1871 | c->send_no_esp_tfc = wm->send_no_esp_tfc; |
1872 | |
1873 | /* |
1874 | * Since security labels use the same REQID for everything, |
1875 | * pre-assign it. |
1876 | */ |
1877 | c->sa_reqid = (wm->sa_reqid != 0 ? wm->sa_reqid : |
1878 | wm->ike_version != IKEv2 ? /*generated later*/0 : |
1879 | wm->sec_label != NULL((void*)0) ? gen_reqid() : |
1880 | /*generated later*/0); |
1881 | dbg("%s c->sa_reqid=%d because wm->sa_reqid=%d and sec-label=%s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s c->sa_reqid=%d because wm->sa_reqid=%d and sec-label=%s" , c->name, c->sa_reqid, wm->sa_reqid, (wm->ike_version != IKEv2 ? "not-IKEv2" : wm->sec_label != ((void*)0) ? wm ->sec_label : "n/a")); } } |
1882 | c->name, c->sa_reqid, wm->sa_reqid,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s c->sa_reqid=%d because wm->sa_reqid=%d and sec-label=%s" , c->name, c->sa_reqid, wm->sa_reqid, (wm->ike_version != IKEv2 ? "not-IKEv2" : wm->sec_label != ((void*)0) ? wm ->sec_label : "n/a")); } } |
1883 | (wm->ike_version != IKEv2 ? "not-IKEv2" :{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s c->sa_reqid=%d because wm->sa_reqid=%d and sec-label=%s" , c->name, c->sa_reqid, wm->sa_reqid, (wm->ike_version != IKEv2 ? "not-IKEv2" : wm->sec_label != ((void*)0) ? wm ->sec_label : "n/a")); } } |
1884 | wm->sec_label != NULL ? wm->sec_label :{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s c->sa_reqid=%d because wm->sa_reqid=%d and sec-label=%s" , c->name, c->sa_reqid, wm->sa_reqid, (wm->ike_version != IKEv2 ? "not-IKEv2" : wm->sec_label != ((void*)0) ? wm ->sec_label : "n/a")); } } |
1885 | "n/a")){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s c->sa_reqid=%d because wm->sa_reqid=%d and sec-label=%s" , c->name, c->sa_reqid, wm->sa_reqid, (wm->ike_version != IKEv2 ? "not-IKEv2" : wm->sec_label != ((void*)0) ? wm ->sec_label : "n/a")); } }; |
1886 | |
1887 | /* |
1888 | * Set both end's sec_label to the same value. |
1889 | */ |
1890 | |
1891 | if (wm->sec_label != NULL((void*)0)) { |
1892 | dbg("received sec_label '%s' from whack", wm->sec_label){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("received sec_label '%s' from whack", wm->sec_label ); } }; |
1893 | /* include NUL! */ |
1894 | shunk_t sec_label = shunk2(wm->sec_label, strlen(wm->sec_label)+1); |
1895 | err_t ugh = vet_seclabel(sec_label); |
1896 | if (ugh != NULL((void*)0)) { |
1897 | llog(RC_LOG_SERIOUS, c->logger, "failed to add connection: %s: policy-label=%s", |
1898 | ugh, wm->sec_label); |
1899 | return false0; |
1900 | } |
1901 | config->sec_label = clone_hunk(sec_label, "struct config sec_label")({ typeof(sec_label) hunk_ = sec_label; clone_bytes_as_chunk( hunk_.ptr, hunk_.len, "struct config sec_label"); }); |
1902 | } |
1903 | |
1904 | |
1905 | /* |
1906 | * At this point THIS and THAT are disoriented so |
1907 | * distinguishing one as local and the other as remote is |
1908 | * pretty much meaningless. |
1909 | * |
1910 | * Somewhat arbitrarially (as in this is the way it's always |
1911 | * been) start with: |
1912 | * |
1913 | * LEFT == LOCAL / THIS |
1914 | * RIGHT == REMOTE / THAT |
1915 | * |
1916 | * XXX: This is all too confusing - wouldn't it be simpler if |
1917 | * there was a '.left' and '.right' (or even .end[2] - this |
1918 | * code seems to be crying out for a for loop) and then having |
1919 | * orient() set up .local and .remote pointers or indexes |
1920 | * accordingly? |
1921 | */ |
1922 | |
1923 | for (enum left_right end_index = 0; end_index < elemsof(config->end)(sizeof(config->end) / sizeof(*(config->end))); end_index++) { |
1924 | struct config_end *config_end = &config->end[end_index]; |
1925 | config_end->end_index = end_index; |
1926 | config_end->leftright = (end_index == LEFT_END ? "left" : |
1927 | end_index == RIGHT_END ? "right" : |
1928 | NULL((void*)0)); |
1929 | passert(config_end->leftright != NULL)({ _Bool assertion__ = config_end->leftright != ((void*)0) ; if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 1929, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "config_end->leftright != ((void*)0)" ); } (void) 1; }); |
1930 | } |
1931 | |
1932 | struct end *left = &c->spd.this; |
1933 | struct end *right = &c->spd.that; |
1934 | left->config = c->local = &config->end[LEFT_END]; |
1935 | right->config = c->remote = &config->end[RIGHT_END]; |
1936 | |
1937 | int same_leftca = extract_end(c, left, &config->end[LEFT_END], &wm->left, |
1938 | /*other_end*/right, c->logger); |
1939 | if (same_leftca < 0) { |
1940 | return false0; |
1941 | } |
1942 | |
1943 | int same_rightca = extract_end(c, right, &config->end[RIGHT_END], &wm->right, |
1944 | /*other_end*/left, c->logger); |
1945 | if (same_rightca < 0) { |
1946 | return false0; |
1947 | } |
1948 | |
1949 | if (same_rightca == 1) { |
1950 | c->spd.that.ca = clone_hunk(c->spd.this.ca, "same rightca")({ typeof(c->spd.this.ca) hunk_ = c->spd.this.ca; clone_bytes_as_chunk (hunk_.ptr, hunk_.len, "same rightca"); }); |
1951 | } else if (same_leftca == 1) { |
1952 | c->spd.this.ca = clone_hunk(c->spd.that.ca, "same leftca")({ typeof(c->spd.that.ca) hunk_ = c->spd.that.ca; clone_bytes_as_chunk (hunk_.ptr, hunk_.len, "same leftca"); }); |
1953 | } |
1954 | |
1955 | if (c->spd.this.xauth_server || c->spd.that.xauth_server) |
1956 | c->policy |= POLICY_XAUTH((lset_t)1 << (POLICY_XAUTH_IX)); |
1957 | |
1958 | update_ends_from_this_host_addr(&c->spd.this, &c->spd.that); |
1959 | update_ends_from_this_host_addr(&c->spd.that, &c->spd.this); |
1960 | |
1961 | /* |
1962 | * If both left/rightauth is unset, fill it in with (preferred) symmetric policy |
1963 | */ |
1964 | if (wm->left.authby == AUTHBY_UNSET && wm->right.authby == AUTHBY_UNSET) { |
1965 | if (c->policy & POLICY_RSASIG((lset_t)1 << (POLICY_RSASIG_IX))) |
1966 | c->spd.this.authby = c->spd.that.authby = AUTHBY_RSASIG; |
1967 | else if (c->policy & POLICY_ECDSA((lset_t)1 << (POLICY_ECDSA_IX))) |
1968 | c->spd.this.authby = c->spd.that.authby = AUTHBY_ECDSA; |
1969 | else if (c->policy & POLICY_PSK((lset_t)1 << (POLICY_PSK_IX))) |
1970 | c->spd.this.authby = c->spd.that.authby = AUTHBY_PSK; |
1971 | else if (c->policy & POLICY_AUTH_NULL((lset_t)1 << (POLICY_AUTH_NULL_IX))) |
1972 | c->spd.this.authby = c->spd.that.authby = AUTHBY_NULL; |
1973 | } |
1974 | |
1975 | /* if left/rightauth are set, but symmetric policy is not, fill it in */ |
1976 | if (wm->left.authby == wm->right.authby) { |
1977 | switch (wm->left.authby) { |
1978 | case AUTHBY_RSASIG: |
1979 | c->policy |= POLICY_RSASIG((lset_t)1 << (POLICY_RSASIG_IX)); |
1980 | break; |
1981 | case AUTHBY_ECDSA: |
1982 | c->policy |= POLICY_ECDSA((lset_t)1 << (POLICY_ECDSA_IX)); |
1983 | break; |
1984 | case AUTHBY_PSK: |
1985 | c->policy |= POLICY_PSK((lset_t)1 << (POLICY_PSK_IX)); |
1986 | break; |
1987 | case AUTHBY_NULL: |
1988 | c->policy |= POLICY_AUTH_NULL((lset_t)1 << (POLICY_AUTH_NULL_IX)); |
1989 | break; |
1990 | default: |
1991 | break; |
1992 | } |
1993 | } |
1994 | |
1995 | /* |
1996 | * force any wildcard host IP address, any wildcard subnet |
1997 | * or any wildcard ID to _that_ end |
1998 | */ |
1999 | if (address_is_unset(&c->spd.this.host_addr) || |
2000 | address_is_any(c->spd.this.host_addr) || |
2001 | c->spd.this.has_port_wildcard || |
2002 | c->spd.this.has_id_wildcards) { |
2003 | struct end t = c->spd.this; |
2004 | |
2005 | c->spd.this = c->spd.that; |
2006 | c->spd.that = t; |
2007 | } |
2008 | |
2009 | c->spd.spd_next = NULL((void*)0); |
2010 | |
2011 | /* set internal fields */ |
2012 | c->instance_serial = 0; |
2013 | c->interface = NULL((void*)0); |
2014 | c->spd.routing = RT_UNROUTED; |
2015 | c->newest_ike_sa = SOS_NOBODY0; |
2016 | c->newest_ipsec_sa = SOS_NOBODY0; |
2017 | c->spd.eroute_owner = SOS_NOBODY0; |
2018 | c->temp_vars.num_redirects = 0; |
2019 | /* |
2020 | * is spd.reqid necessary for all c? CK_INSTANCE or CK_PERMANENT |
2021 | * need one. Does CK_TEMPLATE need one? |
2022 | */ |
2023 | c->spd.reqid = c->sa_reqid == 0 ? gen_reqid() : c->sa_reqid; |
2024 | dbg("%s c->spd.reqid=%d because c->sa_reqid=%d",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s c->spd.reqid=%d because c->sa_reqid=%d" , c->name, c->spd.reqid, c->sa_reqid); } } |
2025 | c->name, c->spd.reqid, c->sa_reqid){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s c->spd.reqid=%d because c->sa_reqid=%d" , c->name, c->spd.reqid, c->sa_reqid); } }; |
2026 | |
2027 | /* force all oppo connections to have a client */ |
2028 | if (c->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) { |
2029 | c->spd.that.has_client = true1; |
2030 | c->spd.that.client.maskbits = 0; /* ??? shouldn't this be 32 for v4? */ |
2031 | /* |
2032 | * We cannot have unlimited keyingtries for Opportunistic, or else |
2033 | * we gain infinite partial IKE SA's. But also, more than one makes |
2034 | * no sense, since it will be installing a failureshunt (not |
2035 | * negotiationshunt) on the 2nd keyingtry, and try to re-install another |
2036 | * negotiation or failure shunt |
2037 | */ |
2038 | if (c->sa_keying_tries == 0) { |
2039 | c->sa_keying_tries = 1; |
2040 | llog(RC_LOG, c->logger, |
2041 | "the connection is Opportunistic, but used keyingtries=0. The specified value was changed to 1"); |
2042 | } |
2043 | } |
2044 | |
2045 | if (c->policy & POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX))) { |
2046 | dbg("connection is group: by policy"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is group: by policy"); } }; |
2047 | c->kind = CK_GROUP; |
2048 | add_group(c); |
2049 | } else if (!NEVER_NEGOTIATE(c->policy)(((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) && (address_is_unset(&c->spd.that.host_addr) || |
2050 | address_is_any(c->spd.that.host_addr))) { |
2051 | dbg("connection is template: no remote address yet policy negotiate"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is template: no remote address yet policy negotiate" ); } }; |
2052 | c->kind = CK_TEMPLATE; |
2053 | } else if (c->spd.that.has_port_wildcard) { |
2054 | dbg("connection is template: remote has wildcard port"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is template: remote has wildcard port" ); } }; |
2055 | c->kind = CK_TEMPLATE; |
2056 | } else if (c->ike_version == IKEv2 && c->config->sec_label.len > 0) { |
2057 | dbg("connection is template: has security label: "PRI_SHUNK,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is template: has security label: " "%.*s", ((int) (c->config->sec_label).len), (const char *) ((c->config->sec_label).ptr)); } } |
2058 | pri_shunk(c->config->sec_label)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is template: has security label: " "%.*s", ((int) (c->config->sec_label).len), (const char *) ((c->config->sec_label).ptr)); } }; |
2059 | c->kind = CK_TEMPLATE; |
2060 | } else if (wm->left.virt != NULL((void*)0) || wm->right.virt != NULL((void*)0)) { |
2061 | /* |
2062 | * If we have a subnet=vnet: needing instantiation |
2063 | * so we can accept multiple subnets from |
2064 | * the remote peer. |
2065 | */ |
2066 | dbg("connection is template: there are vnets at play"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is template: there are vnets at play" ); } }; |
2067 | c->kind = CK_TEMPLATE; |
2068 | } else if (c->policy & POLICY_IKEV2_ALLOW_NARROWING((lset_t)1 << (POLICY_IKEV2_ALLOW_NARROWING_IX))) { |
2069 | dbg("connection is template: POLICY_IKEV2_ALLOW_NARROWING"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is template: POLICY_IKEV2_ALLOW_NARROWING" ); } }; |
2070 | c->kind = CK_TEMPLATE; |
2071 | } else { |
2072 | dbg("connection is permanent: by default"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection is permanent: by default"); } }; |
2073 | c->kind = CK_PERMANENT; |
2074 | } |
2075 | |
2076 | set_policy_prio(c); /* must be after kind is set */ |
2077 | |
2078 | c->extra_debugging = wm->debugging; |
2079 | |
2080 | /* at most one virt can be present */ |
2081 | passert(wm->left.virt == NULL || wm->right.virt == NULL)({ _Bool assertion__ = wm->left.virt == ((void*)0) || wm-> right.virt == ((void*)0); if (!assertion__) { where_t here = ( { static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2081, }; &here; } ); const struct logger *logger_ = &failsafe_logger; llog_passert (logger_, here, "%s", "wm->left.virt == ((void*)0) || wm->right.virt == ((void*)0)" ); } (void) 1; }); |
2082 | |
2083 | if (wm->left.virt != NULL((void*)0) || wm->right.virt != NULL((void*)0)) { |
2084 | /* |
2085 | * This now happens with wildcards on |
2086 | * non-instantiations, such as rightsubnet=vnet:%priv |
2087 | * or rightprotoport=17/%any |
2088 | * passert(address_is_unset(&c->spd.that.host_addr) || address_is_any(c->spd.that.host_addr)); |
2089 | */ |
2090 | passert(c->spd.that.virt == NULL)({ _Bool assertion__ = c->spd.that.virt == ((void*)0); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2090, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "c->spd.that.virt == ((void*)0)" ); } (void) 1; }); |
2091 | c->spd.that.virt = create_virtual(wm->left.virt != NULL((void*)0) ? |
2092 | wm->left.virt : |
2093 | wm->right.virt, |
2094 | c->logger); |
2095 | if (c->spd.that.virt != NULL((void*)0)) |
2096 | c->spd.that.has_client = true1; |
2097 | } |
2098 | |
2099 | if (c->pool != NULL((void*)0)) |
2100 | reference_addresspool(c); |
2101 | |
2102 | orient(c, c->logger); |
2103 | |
2104 | connect_to_host_pair(c); |
2105 | /* non configurable */ |
2106 | c->ike_window = IKE_V2_OVERLAPPING_WINDOW_SIZE1; |
2107 | return true1; |
2108 | } |
2109 | |
2110 | /* slightly different names compared to pluto_constants.c */ |
2111 | static const char *const policy_shunt_names[4] = { |
2112 | "trap[should not happen]", |
2113 | "passthrough", |
2114 | "drop", |
2115 | "reject", |
2116 | }; |
2117 | |
2118 | void add_connection(const struct whack_message *wm, struct logger *logger) |
2119 | { |
2120 | /* |
2121 | * Check for duplicate before allocating; otherwize the lookup |
2122 | * will return the just allocated connection missing the |
2123 | * original. |
2124 | */ |
2125 | if (conn_by_name(wm->name, false0/*!strict*/) != NULL((void*)0)) { |
2126 | llog(RC_DUPNAME, logger, |
2127 | "attempt to redefine connection \"%s\"", wm->name); |
2128 | return; |
2129 | } |
2130 | |
2131 | struct connection *c = alloc_connection(wm->name, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2131, }; &here ; })); |
2132 | /* XXX: something better? */ |
2133 | close_any(&c->logger->global_whackfd)close_any_fd((&c->logger->global_whackfd), ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2133, }; &here; })); |
2134 | c->logger->global_whackfd = fd_dup(logger->global_whackfd, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2134, }; &here ; })); |
2135 | |
2136 | if (!extract_connection(wm, c)) { |
2137 | /* already logged */ |
2138 | discard_connection(&c, false0/*not-valid*/); |
2139 | return; |
2140 | } |
2141 | |
2142 | /* log all about this connection */ |
2143 | const char *what = (NEVER_NEGOTIATE(c->policy)(((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? policy_shunt_names[(c->policy & POLICY_SHUNT_MASK(((lset_t)1 << (POLICY_SHUNT1_IX)) - ((lset_t)1 << (POLICY_SHUNT0_IX)) + ((lset_t)1 << (POLICY_SHUNT1_IX) ))) >> POLICY_SHUNT_SHIFTPOLICY_SHUNT0_IX] : |
2144 | c->ike_version == IKEv1 ? "IKEv1" : |
2145 | c->ike_version == IKEv2 ? "IKEv2" : |
2146 | "IKEv?"); |
2147 | /* connection is good-to-go: log against it */ |
2148 | llog(RC_LOG, c->logger, "added %s connection", what); |
2149 | policy_buf pb; |
2150 | dbg("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2151 | deltasecs(c->sa_ike_life_seconds),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2152 | deltasecs(c->sa_ipsec_life_seconds),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2153 | deltasecs(c->sa_rekey_margin),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2154 | c->sa_rekey_fuzz,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2155 | c->sa_keying_tries,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2156 | c->sa_replay_window,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2157 | str_policy(c->policy, &pb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } } |
2158 | NEVER_NEGOTIATE(c->policy) ? "+NEVER_NEGOTIATE" : ""){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("ike_life: %jd; ipsec_life: %jds; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu; replay_window: %u; policy: %s%s" , deltasecs(c->sa_ike_life_seconds), deltasecs(c->sa_ipsec_life_seconds ), deltasecs(c->sa_rekey_margin), c->sa_rekey_fuzz, c-> sa_keying_tries, c->sa_replay_window, str_policy(c->policy , &pb), (((((c->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : ""); } }; |
2159 | char topo[CONN_BUF_LEN(2 * ((sizeof(subnet_buf) + sizeof(address_buf) + sizeof(id_buf ) + sizeof(subnet_buf) + 10) - 1) + 4)]; |
2160 | dbg("%s", format_connection(topo, sizeof(topo), c, &c->spd)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s", format_connection(topo, sizeof(topo), c, &c->spd)); } }; |
2161 | /* XXX: something better? */ |
2162 | close_any(&c->logger->global_whackfd)close_any_fd((&c->logger->global_whackfd), ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2162, }; &here; })); |
2163 | } |
2164 | |
2165 | /* |
2166 | * Derive a template connection from a group connection and target. |
2167 | * Similar to instantiate(). Happens at whack --listen. Returns name |
2168 | * of new connection. NULL on failure (duplicated name). Caller is |
2169 | * responsible for pfreeing name. |
2170 | */ |
2171 | struct connection *add_group_instance(struct connection *group, |
2172 | const ip_selector *target, |
2173 | uint8_t proto , uint16_t sport , uint16_t dport) |
2174 | { |
2175 | passert(group->kind == CK_GROUP)({ _Bool assertion__ = group->kind == CK_GROUP; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2175 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "group->kind == CK_GROUP" ); } (void) 1; }); |
2176 | passert(oriented(group))({ _Bool assertion__ = oriented(group); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__ , .file = "programs/pluto/connections.c", .line = 2176, }; & here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "oriented(group)"); } (void ) 1; }); |
2177 | |
2178 | /* |
2179 | * Manufacture a unique name for this template. |
2180 | */ |
2181 | char *namebuf; /* must free */ |
2182 | |
2183 | subnet_buf targetbuf; |
2184 | str_selector_subnet(target, &targetbuf); |
2185 | |
2186 | if (proto == 0) { |
2187 | namebuf = alloc_printf("%s#%s", group->name, targetbuf.buf); |
2188 | } else { |
2189 | namebuf = alloc_printf("%s#%s-(%d--%d--%d)", group->name, |
2190 | targetbuf.buf, sport, proto, dport); |
2191 | } |
2192 | |
2193 | if (conn_by_name(namebuf, false0/*!strict*/) != NULL((void*)0)) { |
2194 | llog(RC_DUPNAME, group->logger, |
2195 | "group name + target yields duplicate name \"%s\"", namebuf); |
2196 | pfreeany(namebuf){ typeof(namebuf) *pp_ = &(namebuf); if (*pp_ != ((void*) 0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
2197 | return NULL((void*)0); |
2198 | } |
2199 | |
2200 | struct connection *t = clone_connection(namebuf, group, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2200, }; &here ; })); |
2201 | passert(namebuf != t->name)({ _Bool assertion__ = namebuf != t->name; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2201 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "namebuf != t->name"); } (void) 1; }); /* see clone_connection() */ |
2202 | pfreeany(namebuf){ typeof(namebuf) *pp_ = &(namebuf); if (*pp_ != ((void*) 0)) { pfree(*pp_); *pp_ = ((void*)0); } }; |
2203 | t->foodgroup = t->name; /* XXX: DANGER: unshare_connection() will clone this */ |
2204 | |
2205 | /* suppress virt before unsharing */ |
2206 | passert(t->spd.this.virt == NULL)({ _Bool assertion__ = t->spd.this.virt == ((void*)0); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2206, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "t->spd.this.virt == ((void*)0)" ); } (void) 1; }); |
2207 | |
2208 | pexpect(t->spd.spd_next == NULL)({ _Bool assertion__ = t->spd.spd_next == ((void*)0); if ( !assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2208, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "t->spd.spd_next == ((void*)0)" ); } assertion__; }); /* we only handle top spd */ |
2209 | |
2210 | if (t->spd.that.virt != NULL((void*)0)) { |
2211 | DBG_log("virtual_ip not supported in group instance; ignored"); |
2212 | virtual_ip_delref(&t->spd.that.virt, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2212, }; &here ; })); |
2213 | } |
2214 | |
2215 | unshare_connection(t); |
2216 | passert(t->foodgroup != t->name)({ _Bool assertion__ = t->foodgroup != t->name; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2216 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "t->foodgroup != t->name" ); } (void) 1; }); /* XXX: see DANGER above */ |
2217 | |
2218 | t->spd.that.client = *target; |
2219 | if (proto != 0) { |
2220 | /* if foodgroup entry specifies protoport, override protoport= settings */ |
2221 | t->spd.this.protocol = proto; |
2222 | t->spd.that.protocol = proto; |
2223 | t->spd.this.port = sport; |
2224 | t->spd.that.port = dport; |
2225 | } |
2226 | t->policy &= ~(POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX)) | POLICY_GROUTED((lset_t)1 << (POLICY_GROUTED_IX))); |
2227 | t->policy |= POLICY_GROUPINSTANCE((lset_t)1 << (POLICY_GROUPINSTANCE_IX)); /* mark as group instance for later */ |
2228 | t->kind = (address_is_unset(&t->spd.that.host_addr) || address_is_any(t->spd.that.host_addr)) && |
2229 | !NEVER_NEGOTIATE(t->policy)(((((t->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? |
2230 | CK_TEMPLATE : CK_INSTANCE; |
2231 | |
2232 | /* reset log file info */ |
2233 | t->log_file_name = NULL((void*)0); |
2234 | t->log_file = NULL((void*)0); |
2235 | t->log_file_err = false0; |
2236 | |
2237 | t->spd.reqid = group->sa_reqid == 0 ? gen_reqid() : group->sa_reqid; |
2238 | dbg("%s t->spd.reqid=%d because group->sa_reqid=%d",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s t->spd.reqid=%d because group->sa_reqid=%d" , t->name, t->spd.reqid, group->sa_reqid); } } |
2239 | t->name, t->spd.reqid, group->sa_reqid){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s t->spd.reqid=%d because group->sa_reqid=%d" , t->name, t->spd.reqid, group->sa_reqid); } }; |
2240 | |
2241 | /* same host_pair as parent: stick after parent on list */ |
2242 | /* t->hp_next = group->hp_next; */ /* done by clone_thing */ |
2243 | group->hp_next = t; |
2244 | |
2245 | /* route if group is routed */ |
2246 | if (group->policy & POLICY_GROUTED((lset_t)1 << (POLICY_GROUTED_IX))) { |
2247 | /* XXX: something better? */ |
2248 | close_any(&t->logger->global_whackfd)close_any_fd((&t->logger->global_whackfd), ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2248, }; &here; })); |
2249 | t->logger->global_whackfd = fd_dup(group->logger->global_whackfd, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2249, }; &here ; })); |
2250 | if (!trap_connection(t)) { |
2251 | llog(WHACK_STREAM|RC_ROUTE, group->logger, |
2252 | "could not route"); |
2253 | } |
2254 | /* XXX: something better? */ |
2255 | close_any(&t->logger->global_whackfd)close_any_fd((&t->logger->global_whackfd), ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2255, }; &here; })); |
2256 | } |
2257 | return t; |
2258 | } |
2259 | |
2260 | /* |
2261 | * Common part of instantiating a Road Warrior or Opportunistic connection. |
2262 | * peers_id can be used to carry over an ID discovered in Phase 1. |
2263 | * It must not disagree with the one in c, but if that is unspecified, |
2264 | * the new connection will use peers_id. |
2265 | * If peers_id is NULL, and c.that.id is uninstantiated (ID_NONE), the |
2266 | * new connection will continue to have an uninstantiated that.id. |
2267 | * Note: instantiation does not affect port numbers. |
2268 | * |
2269 | * Note that instantiate can only deal with a single SPD/eroute. |
2270 | */ |
2271 | struct connection *instantiate(struct connection *c, |
2272 | const ip_address *peer_addr, |
2273 | const struct id *peer_id, |
2274 | shunk_t sec_label) |
2275 | { |
2276 | passert(c->kind == CK_TEMPLATE)({ _Bool assertion__ = c->kind == CK_TEMPLATE; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2276 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "c->kind == CK_TEMPLATE" ); } (void) 1; }); |
2277 | passert(c->spd.spd_next == NULL)({ _Bool assertion__ = c->spd.spd_next == ((void*)0); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2277, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "c->spd.spd_next == ((void*)0)" ); } (void) 1; }); |
2278 | |
2279 | /* |
2280 | * Is the new connection still a template? |
2281 | * |
2282 | * For instance, a responder with a template connection T with |
2283 | * both remote=%any and configuration sec_label will: |
2284 | * |
2285 | * - during IKE_SA_INIT, instantiate T with the remote |
2286 | * address; creating a new template T.IKE (since the |
2287 | * negotiated sec_label isn't known it is still a template) |
2288 | * |
2289 | * - during IKE_AUTH (or CREATE_CHILD_SA), instantate T.IKE |
2290 | * with the Child SA's negotiated SEC_LABEL creating the |
2291 | * connection instance C.CHILD |
2292 | */ |
2293 | enum connection_kind kind; |
2294 | if (c->config->sec_label.len > 0) { |
2295 | /* |
2296 | * Either: |
2297 | * |
2298 | * - C is T, and D is T.IKE (the remote address is |
2299 | * updated below) -> CK_TEMPLATE |
2300 | * |
2301 | * Or: |
2302 | * |
2303 | * - or C is T.IKE and D is C.CHILD (the sec_label is |
2304 | * updated below) -> CK_INSTANCE |
2305 | */ |
2306 | pexpect(address_is_specified(c->spd.that.host_addr) || peer_addr != NULL)({ _Bool assertion__ = address_is_specified(c->spd.that.host_addr ) || peer_addr != ((void*)0); if (!assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2306, }; &here ; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect (logger_, here_, "%s", "address_is_specified(c->spd.that.host_addr) || peer_addr != ((void*)0)" ); } assertion__; }); |
2307 | if (sec_label.len == 0) { |
2308 | kind = CK_TEMPLATE; |
2309 | } else { |
2310 | kind = CK_INSTANCE; |
2311 | } |
2312 | } else { |
2313 | /* pexpect(address_is_specified(c->spd.that.host_addr) || peer_addr != NULL); true??? */ |
2314 | kind = CK_INSTANCE; |
2315 | } |
2316 | |
2317 | c->instance_serial++; |
2318 | struct connection *d = clone_connection(c->name, c, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2318, }; &here ; })); |
2319 | passert(c->name != d->name)({ _Bool assertion__ = c->name != d->name; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2319 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "c->name != d->name" ); } (void) 1; }); /* see clone_connection() */ |
2320 | if (peer_id != NULL((void*)0)) { |
2321 | int wildcards; /* value ignored */ |
2322 | |
2323 | passert(d->spd.that.id.kind == ID_FROMCERT || match_id(peer_id, &d->spd.that.id, &wildcards))({ _Bool assertion__ = d->spd.that.id.kind == ID_FROMCERT || match_id(peer_id, &d->spd.that.id, &wildcards); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2323, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "d->spd.that.id.kind == ID_FROMCERT || match_id(peer_id, &d->spd.that.id, &wildcards)" ); } (void) 1; }); |
2324 | d->spd.that.id = *peer_id; |
2325 | d->spd.that.has_id_wildcards = false0; |
2326 | } |
2327 | unshare_connection(d); |
2328 | d->kind = kind; |
2329 | passert(oriented(d))({ _Bool assertion__ = oriented(d); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__ , .file = "programs/pluto/connections.c", .line = 2329, }; & here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "oriented(d)"); } (void) 1 ; }); |
2330 | if (peer_addr != NULL((void*)0)) { |
2331 | d->spd.that.host_addr = *peer_addr; |
2332 | } |
2333 | update_ends_from_this_host_addr(&d->spd.that, &d->spd.this); |
2334 | |
2335 | /* |
2336 | * We cannot guess what our next_hop should be, but if it was |
2337 | * explicitly specified as 0.0.0.0, we set it to be peer. |
2338 | * (whack will not allow nexthop to be elided in RW case.) |
2339 | */ |
2340 | update_ends_from_this_host_addr(&d->spd.this, &d->spd.that); |
2341 | d->spd.spd_next = NULL((void*)0); |
2342 | |
2343 | d->spd.reqid = c->sa_reqid == 0 ? gen_reqid() : c->sa_reqid; |
2344 | dbg("%s d->spd.reqid=%d because c->sa_reqid=%d",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s d->spd.reqid=%d because c->sa_reqid=%d" , d->name, d->spd.reqid, c->sa_reqid); } } |
2345 | d->name, d->spd.reqid, c->sa_reqid){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s d->spd.reqid=%d because c->sa_reqid=%d" , d->name, d->spd.reqid, c->sa_reqid); } }; |
2346 | |
2347 | /* since both ends updated; presumably already oriented? */ |
2348 | set_policy_prio(d); |
2349 | |
2350 | /* set internal fields */ |
2351 | d->spd.routing = RT_UNROUTED; |
2352 | d->newest_ike_sa = SOS_NOBODY0; |
2353 | d->newest_ipsec_sa = SOS_NOBODY0; |
2354 | d->spd.eroute_owner = SOS_NOBODY0; |
2355 | |
2356 | /* reset log file info */ |
2357 | d->log_file_name = NULL((void*)0); |
2358 | d->log_file = NULL((void*)0); |
2359 | d->log_file_err = false0; |
2360 | |
2361 | if (c->sa_marks.in.unique) { |
2362 | d->sa_marks.in.val = global_marks; |
2363 | d->sa_marks.out.val = global_marks; |
2364 | global_marks++; |
2365 | if (global_marks == UINT_MAX(2147483647 *2U +1U) - 1) { |
2366 | /* we hope 2^32 connections ago are no longer around */ |
2367 | global_marks = MINIMUM_IPSEC_SA_RANDOM_MARK65536; |
2368 | } |
2369 | } |
2370 | |
2371 | /* assumption: orientation is the same as c's */ |
2372 | connect_to_host_pair(d); |
2373 | |
2374 | if (sec_label.len > 0) { |
2375 | /* |
2376 | * Install the sec_label from either an acquire or |
2377 | * child payload into both ends. |
2378 | */ |
2379 | FOR_EACH_THING(end, &d->spd.this, &d->spd.that)for (typeof(&d->spd.this) things_[] = { &d->spd .this, &d->spd.that }, *thingp_ = things_, end; thingp_ < things_ + (sizeof(things_) / sizeof(*(things_))) ? (end = *thingp_, 1) : 0; thingp_++) { |
2380 | pexpect(end->sec_label.ptr == NULL)({ _Bool assertion__ = end->sec_label.ptr == ((void*)0); if (!assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2380, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "end->sec_label.ptr == ((void*)0)" ); } assertion__; }); |
2381 | end->sec_label = clone_hunk(sec_label, "instantiate() sec_label")({ typeof(sec_label) hunk_ = sec_label; clone_bytes_as_chunk( hunk_.ptr, hunk_.len, "instantiate() sec_label"); }); |
2382 | } |
2383 | } |
2384 | |
2385 | connection_buf cb, db; |
2386 | address_buf pab; |
2387 | id_buf pib; |
2388 | dbg("instantiated "PRI_CO" "PRI_CONNECTION" as "PRI_CO" "PRI_CONNECTION" using kind=%s remote_address=%s remote_id=%s sec_label="PRI_SHUNK,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("instantiated ""$%u"" ""\"%s\"%s"" as ""$%u"" " "\"%s\"%s"" using kind=%s remote_address=%s remote_id=%s sec_label=" "%.*s", ((c->serialno)), (c)->name, str_connection_instance (c, &cb), ((d->serialno)), (d)->name, str_connection_instance (d, &db), enum_name(&connection_kind_names, d->kind ), peer_addr != ((void*)0) ? str_address(peer_addr, &pab) : "N/A", peer_id != ((void*)0) ? str_id_bytes(peer_id, jam_raw_bytes , &pib) : "N/A", ((int) (d->spd.this.sec_label).len), ( const char *) ((d->spd.this.sec_label).ptr)); } } |
2389 | pri_co(c->serialno), pri_connection(c, &cb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("instantiated ""$%u"" ""\"%s\"%s"" as ""$%u"" " "\"%s\"%s"" using kind=%s remote_address=%s remote_id=%s sec_label=" "%.*s", ((c->serialno)), (c)->name, str_connection_instance (c, &cb), ((d->serialno)), (d)->name, str_connection_instance (d, &db), enum_name(&connection_kind_names, d->kind ), peer_addr != ((void*)0) ? str_address(peer_addr, &pab) : "N/A", peer_id != ((void*)0) ? str_id_bytes(peer_id, jam_raw_bytes , &pib) : "N/A", ((int) (d->spd.this.sec_label).len), ( const char *) ((d->spd.this.sec_label).ptr)); } } |
2390 | pri_co(d->serialno), pri_connection(d, &db),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("instantiated ""$%u"" ""\"%s\"%s"" as ""$%u"" " "\"%s\"%s"" using kind=%s remote_address=%s remote_id=%s sec_label=" "%.*s", ((c->serialno)), (c)->name, str_connection_instance (c, &cb), ((d->serialno)), (d)->name, str_connection_instance (d, &db), enum_name(&connection_kind_names, d->kind ), peer_addr != ((void*)0) ? str_address(peer_addr, &pab) : "N/A", peer_id != ((void*)0) ? str_id_bytes(peer_id, jam_raw_bytes , &pib) : "N/A", ((int) (d->spd.this.sec_label).len), ( const char *) ((d->spd.this.sec_label).ptr)); } } |
2391 | enum_name(&connection_kind_names, d->kind),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("instantiated ""$%u"" ""\"%s\"%s"" as ""$%u"" " "\"%s\"%s"" using kind=%s remote_address=%s remote_id=%s sec_label=" "%.*s", ((c->serialno)), (c)->name, str_connection_instance (c, &cb), ((d->serialno)), (d)->name, str_connection_instance (d, &db), enum_name(&connection_kind_names, d->kind ), peer_addr != ((void*)0) ? str_address(peer_addr, &pab) : "N/A", peer_id != ((void*)0) ? str_id_bytes(peer_id, jam_raw_bytes , &pib) : "N/A", ((int) (d->spd.this.sec_label).len), ( const char *) ((d->spd.this.sec_label).ptr)); } } |
2392 | peer_addr != NULL ? str_address(peer_addr, &pab) : "N/A",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("instantiated ""$%u"" ""\"%s\"%s"" as ""$%u"" " "\"%s\"%s"" using kind=%s remote_address=%s remote_id=%s sec_label=" "%.*s", ((c->serialno)), (c)->name, str_connection_instance (c, &cb), ((d->serialno)), (d)->name, str_connection_instance (d, &db), enum_name(&connection_kind_names, d->kind ), peer_addr != ((void*)0) ? str_address(peer_addr, &pab) : "N/A", peer_id != ((void*)0) ? str_id_bytes(peer_id, jam_raw_bytes , &pib) : "N/A", ((int) (d->spd.this.sec_label).len), ( const char *) ((d->spd.this.sec_label).ptr)); } } |
2393 | peer_id != NULL ? str_id(peer_id, &pib) : "N/A",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("instantiated ""$%u"" ""\"%s\"%s"" as ""$%u"" " "\"%s\"%s"" using kind=%s remote_address=%s remote_id=%s sec_label=" "%.*s", ((c->serialno)), (c)->name, str_connection_instance (c, &cb), ((d->serialno)), (d)->name, str_connection_instance (d, &db), enum_name(&connection_kind_names, d->kind ), peer_addr != ((void*)0) ? str_address(peer_addr, &pab) : "N/A", peer_id != ((void*)0) ? str_id_bytes(peer_id, jam_raw_bytes , &pib) : "N/A", ((int) (d->spd.this.sec_label).len), ( const char *) ((d->spd.this.sec_label).ptr)); } } |
2394 | pri_shunk(d->spd.this.sec_label)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("instantiated ""$%u"" ""\"%s\"%s"" as ""$%u"" " "\"%s\"%s"" using kind=%s remote_address=%s remote_id=%s sec_label=" "%.*s", ((c->serialno)), (c)->name, str_connection_instance (c, &cb), ((d->serialno)), (d)->name, str_connection_instance (d, &db), enum_name(&connection_kind_names, d->kind ), peer_addr != ((void*)0) ? str_address(peer_addr, &pab) : "N/A", peer_id != ((void*)0) ? str_id_bytes(peer_id, jam_raw_bytes , &pib) : "N/A", ((int) (d->spd.this.sec_label).len), ( const char *) ((d->spd.this.sec_label).ptr)); } }; |
2395 | |
2396 | return d; |
2397 | } |
2398 | |
2399 | struct connection *rw_instantiate(struct connection *c, |
2400 | const ip_address *peer_addr, |
2401 | const ip_selector *peer_subnet, |
2402 | const struct id *peer_id) |
2403 | { |
2404 | struct connection *d = instantiate(c, peer_addr, peer_id, null_shunk); |
2405 | |
2406 | if (peer_subnet != NULL((void*)0) && is_virtual_connection(c)) { |
2407 | d->spd.that.client = *peer_subnet; |
2408 | if (selector_eq_address(*peer_subnet, *peer_addr)) |
2409 | d->spd.that.has_client = false0; |
2410 | } |
2411 | |
2412 | if (d->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) { |
2413 | /* |
2414 | * This must be before we know the client addresses. |
2415 | * Fill in one that is impossible. This prevents anyone else |
2416 | * from trying to use this connection to get to a particular |
2417 | * client |
2418 | */ |
2419 | d->spd.that.client = selector_type(&d->spd.that.client)->selector.zero; |
2420 | } |
2421 | connection_buf inst; |
2422 | address_buf b; |
2423 | dbg("rw_instantiate() instantiated "PRI_CONNECTION" for %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("rw_instantiate() instantiated ""\"%s\"%s"" for %s" , (d)->name, str_connection_instance(d, &inst), str_address (peer_addr, &b)); } } |
2424 | pri_connection(d, &inst),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("rw_instantiate() instantiated ""\"%s\"%s"" for %s" , (d)->name, str_connection_instance(d, &inst), str_address (peer_addr, &b)); } } |
2425 | str_address(peer_addr, &b)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("rw_instantiate() instantiated ""\"%s\"%s"" for %s" , (d)->name, str_connection_instance(d, &inst), str_address (peer_addr, &b)); } }; |
2426 | return d; |
2427 | } |
2428 | |
2429 | /* priority formatting */ |
2430 | size_t jam_policy_prio(struct jambuf *buf, policy_prio_t pp) |
2431 | { |
2432 | if (pp == BOTTOM_PRIO((policy_prio_t)0)) { |
2433 | return jam_string(buf, "0"); |
2434 | } |
2435 | |
2436 | return jam(buf, "%" PRIu32"u" ",%" PRIu32"u", |
2437 | pp >> 17, (pp & ~(~(policy_prio_t)0 << 17)) >> 8); |
2438 | } |
2439 | |
2440 | const char *str_policy_prio(policy_prio_t pp, policy_prio_buf *buf) |
2441 | { |
2442 | struct jambuf jb = ARRAY_AS_JAMBUF(buf->buf)array_as_jambuf((buf->buf), sizeof(buf->buf)); |
2443 | jam_policy_prio(&jb, pp); |
2444 | return buf->buf; |
2445 | } |
2446 | |
2447 | void set_policy_prio(struct connection *c) |
2448 | { |
2449 | c->policy_prio = (((policy_prio_t)c->spd.this.client.maskbits << 17) | |
2450 | ((policy_prio_t)c->spd.that.client.maskbits << 8) | |
2451 | ((policy_prio_t)1)); |
2452 | } |
2453 | |
2454 | /* |
2455 | * Format any information needed to identify an instance of a connection. |
2456 | * Fills any needed information into buf which MUST be big enough. |
2457 | * Road Warrior: peer's IP address |
2458 | * Opportunistic: [" " myclient "==="] " ..." peer ["===" peer_client] '\0' |
2459 | */ |
2460 | |
2461 | static size_t jam_connection_client(struct jambuf *b, |
2462 | const char *prefix, const char *suffix, |
2463 | const ip_selector client, |
2464 | const ip_address host_addr) |
2465 | { |
2466 | size_t s = 0; |
2467 | if (selector_range_eq_address(client, host_addr)) { |
2468 | /* compact denotation for "self" */ |
2469 | } else { |
2470 | s += jam_string(b, prefix); |
2471 | if (selector_is_unset(&client)) { |
2472 | s += jam_string(b, "?"); |
2473 | } else if (selector_is_zero(client)) { |
2474 | s += jam_string(b, "?"); /* unknown */ |
2475 | } else { |
2476 | s += jam_selector_subnet(b, &client); |
2477 | } |
2478 | s += jam_string(b, suffix); |
2479 | } |
2480 | return s; |
2481 | } |
2482 | |
2483 | size_t jam_connection_instance(struct jambuf *buf, const struct connection *c) |
2484 | { |
2485 | if (!pexpect(c->kind == CK_INSTANCE ||({ _Bool assertion__ = c->kind == CK_INSTANCE || c->kind == CK_GOING_AWAY; if (!assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2486, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "c->kind == CK_INSTANCE || c->kind == CK_GOING_AWAY" ); } assertion__; }) |
2486 | c->kind == CK_GOING_AWAY)({ _Bool assertion__ = c->kind == CK_INSTANCE || c->kind == CK_GOING_AWAY; if (!assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2486, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "c->kind == CK_INSTANCE || c->kind == CK_GOING_AWAY" ); } assertion__; })) { |
2487 | return 0; |
2488 | } |
2489 | size_t s = 0; |
2490 | if (c->instance_serial != 0) { |
2491 | s += jam(buf, "[%lu]", c->instance_serial); |
2492 | } |
2493 | if (c->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) { |
2494 | s += jam_connection_client(buf, " ", "===", |
2495 | c->spd.this.client, |
2496 | c->spd.this.host_addr); |
2497 | s += jam_string(buf, " ..."); |
2498 | s += jam_address(buf, &c->spd.that.host_addr); |
2499 | s += jam_connection_client(buf, "===", "", |
2500 | c->spd.that.client, |
2501 | c->spd.that.host_addr); |
2502 | } else { |
2503 | s += jam_string(buf, " "); |
2504 | s += jam_address_sensitive(buf, &c->spd.that.host_addr); |
2505 | } |
2506 | return s; |
2507 | } |
2508 | |
2509 | size_t jam_connection(struct jambuf *buf, const struct connection *c) |
2510 | { |
2511 | size_t s = 0; |
2512 | s += jam(buf, "\"%s\"", c->name); |
2513 | if (c->kind == CK_INSTANCE || c->kind == CK_GOING_AWAY) { |
2514 | s += jam_connection_instance(buf, c); |
2515 | } |
2516 | return s; |
2517 | } |
2518 | |
2519 | const char *str_connection_instance(const struct connection *c, connection_buf *buf) |
2520 | { |
2521 | struct jambuf p = ARRAY_AS_JAMBUF(buf->buf)array_as_jambuf((buf->buf), sizeof(buf->buf)); |
2522 | if (c->kind == CK_INSTANCE) { |
2523 | jam_connection_instance(&p, c); |
2524 | } |
2525 | return buf->buf; |
2526 | } |
2527 | |
2528 | /* |
2529 | * Find an existing connection for a trapped outbound packet. |
2530 | * |
2531 | * This is attempted before we bother with gateway discovery. |
2532 | * + this connection is routed or instance_of_routed_template |
2533 | * (i.e. approved for on-demand) |
2534 | * + this subnet contains our_client (or we are our_client) |
2535 | * + that subnet contains peer_client (or peer is peer_client) |
2536 | * + don't care about Phase 1 IDs (we don't know) |
2537 | * Note: result may still need to be instantiated. |
2538 | * The winner has the highest policy priority. |
2539 | * |
2540 | * If there are several with that priority, we give preference to the |
2541 | * first one that is an instance. |
2542 | * |
2543 | * See also build_outgoing_opportunistic_connection. |
2544 | */ |
2545 | struct connection *find_connection_for_clients(struct spd_route **srp, |
2546 | const ip_endpoint *local_client, |
2547 | const ip_endpoint *remote_client, |
2548 | shunk_t sec_label, struct logger *logger) |
2549 | { |
2550 | passert(!endpoint_is_unset(local_client))({ _Bool assertion__ = !endpoint_is_unset(local_client); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2550, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "!endpoint_is_unset(local_client)" ); } (void) 1; }); |
2551 | passert(!endpoint_is_unset(remote_client))({ _Bool assertion__ = !endpoint_is_unset(remote_client); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2551, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "!endpoint_is_unset(remote_client)" ); } (void) 1; }); |
2552 | passert(endpoint_protocol(*local_client) == endpoint_protocol(*remote_client))({ _Bool assertion__ = endpoint_protocol(*local_client) == endpoint_protocol (*remote_client); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2552, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "endpoint_protocol(*local_client) == endpoint_protocol(*remote_client)" ); } (void) 1; }); |
2553 | |
2554 | int local_port = endpoint_hport(*local_client); |
2555 | int remote_port = endpoint_hport(*remote_client); |
2556 | |
2557 | struct connection *best = NULL((void*)0); |
2558 | policy_prio_t best_prio = BOTTOM_PRIO((policy_prio_t)0); |
2559 | struct spd_route *best_sr = NULL((void*)0); |
2560 | |
2561 | endpoints_buf eb; |
2562 | dbg("find_connection: looking for policy for connection: %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("find_connection: looking for policy for connection: %s" , str_endpoints(local_client, remote_client, &eb)); } } |
2563 | str_endpoints(local_client, remote_client, &eb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("find_connection: looking for policy for connection: %s" , str_endpoints(local_client, remote_client, &eb)); } }; |
2564 | |
2565 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2565, }; &here ; }), .c = NULL((void*)0), }; |
2566 | while (new2old_connection(&cq)) { |
2567 | struct connection *c = cq.c; |
2568 | |
2569 | if (c->kind == CK_GROUP) { |
2570 | continue; |
2571 | } |
2572 | |
2573 | /* |
2574 | * For both IKEv1 and IKEv2 labeled IPsec, don't try |
2575 | * to mix 'n' match acquire sec_label with |
2576 | * non-sec_label connection. |
2577 | */ |
2578 | if ((sec_label.len > 0) != (c->config->sec_label.len > 0)) { |
2579 | continue; |
2580 | } |
2581 | |
2582 | /* |
2583 | * For IKEv2 labeled IPsec, always start with the |
2584 | * template. Who are we to argue if the kernel asks |
2585 | * for a new SA with, seemingly, a security label that |
2586 | * matches an existing connection instance. |
2587 | */ |
2588 | if (c->ike_version == IKEv2 && |
2589 | c->config->sec_label.len > 0 && |
2590 | c->kind != CK_TEMPLATE) { |
2591 | pexpect(c->kind == CK_INSTANCE)({ _Bool assertion__ = c->kind == CK_INSTANCE; if (!assertion__ ) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2591 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_pexpect(logger_, here_, "%s", "c->kind == CK_INSTANCE" ); } assertion__; }); |
2592 | connection_buf cb; |
2593 | dbg("skipping non-template IKEv2 "PRI_CONNECTION" with a security label",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("skipping non-template IKEv2 ""\"%s\"%s"" with a security label" , (c)->name, str_connection_instance(c, &cb)); } } |
2594 | pri_connection(c, &cb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("skipping non-template IKEv2 ""\"%s\"%s"" with a security label" , (c)->name, str_connection_instance(c, &cb)); } }; |
2595 | continue; |
2596 | } |
2597 | |
2598 | /* |
2599 | * When there is a sec_label, it needs to be within |
2600 | * the configuration's range. |
2601 | */ |
2602 | if (sec_label.len > 0 /*implies c->config->sec_label*/ && |
2603 | !sec_label_within_range(sec_label, c->config->sec_label, logger)) { |
2604 | continue; |
2605 | } |
2606 | |
2607 | struct spd_route *sr; |
2608 | |
2609 | for (sr = &c->spd; best != c && sr; sr = sr->spd_next) { |
2610 | |
2611 | /* |
2612 | * XXX: is the !sec_label an IKEv1 thing? An |
2613 | * IKEv2 sec-labeled connection should have |
2614 | * been routed by now? |
2615 | */ |
2616 | if (!routed(sr->routing)((sr->routing) > RT_UNROUTED_HOLD) && |
2617 | !c->instance_initiation_ok && |
2618 | c->config->sec_label.len == 0) { |
2619 | continue; |
2620 | } |
2621 | |
2622 | /* |
2623 | * The triggering traffic needs to be within |
2624 | * the client. |
2625 | */ |
2626 | if (!endpoint_in_selector(*local_client, sr->this.client) || |
2627 | !endpoint_in_selector(*remote_client, sr->that.client)) { |
2628 | continue; |
2629 | } |
2630 | |
2631 | unsigned ipproto = endpoint_protocol(*local_client)->ipproto; |
2632 | policy_prio_t prio = |
2633 | (8 * (c->policy_prio + (c->kind == CK_INSTANCE)) + |
2634 | 2 * (sr->this.port == local_port) + |
2635 | 2 * (sr->that.port == remote_port) + |
2636 | 1 * (sr->this.protocol == ipproto)); |
2637 | |
2638 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
2639 | connection_buf cib_c; |
2640 | selectors_buf sb; |
2641 | DBG_log("find_connection: conn "PRI_CONNECTION"\"%s\"%s"" has compatible peers: %s [pri: %" PRIu32"u" "]", |
2642 | pri_connection(c, &cib_c)(c)->name, str_connection_instance(c, &cib_c), |
2643 | str_selectors(&c->spd.this.client, &c->spd.that.client, &sb), |
2644 | prio); |
2645 | if (best == NULL((void*)0)) { |
2646 | DBG_log("find_connection: first OK "PRI_CONNECTION"\"%s\"%s"" [pri:%" PRIu32"u" "]{%p} (child %s)", |
2647 | pri_connection(c, &cib_c)(c)->name, str_connection_instance(c, &cib_c), |
2648 | prio, c, |
2649 | c->policy_next ? |
2650 | c->policy_next->name : |
2651 | "none"); |
2652 | } else { |
2653 | connection_buf cib_best; |
2654 | DBG_log("find_connection: comparing best "PRI_CONNECTION"\"%s\"%s"" [pri:%" PRIu32"u" "]{%p} (child %s) to "PRI_CONNECTION"\"%s\"%s"" [pri:%" PRIu32"u" "]{%p} (child %s)", |
2655 | pri_connection(best, &cib_best)(best)->name, str_connection_instance(best, &cib_best), |
2656 | best_prio, |
2657 | best, |
2658 | best->policy_next ? |
2659 | best->policy_next->name : |
2660 | "none", |
2661 | pri_connection(c, &cib_c)(c)->name, str_connection_instance(c, &cib_c), |
2662 | prio, c, |
2663 | c->policy_next ? |
2664 | c->policy_next->name : |
2665 | "none"); |
2666 | } |
2667 | } |
2668 | |
2669 | if (best == NULL((void*)0) || prio > best_prio) { |
2670 | best = c; |
2671 | best_sr = sr; |
2672 | best_prio = prio; |
2673 | } |
2674 | } |
2675 | } |
2676 | |
2677 | if (best != NULL((void*)0) && NEVER_NEGOTIATE(best->policy)(((((best->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0)))) |
2678 | best = NULL((void*)0); |
2679 | |
2680 | if (srp != NULL((void*)0) && best != NULL((void*)0)) |
2681 | *srp = best_sr; |
2682 | |
2683 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
2684 | if (best != NULL((void*)0)) { |
2685 | connection_buf cib; |
2686 | DBG_log("find_connection: concluding with "PRI_CONNECTION"\"%s\"%s"" [pri:%" PRIu32"u" "]{%p} kind=%s", |
2687 | pri_connection(best, &cib)(best)->name, str_connection_instance(best, &cib), |
2688 | best_prio, best, |
2689 | enum_name(&connection_kind_names, best->kind)); |
2690 | } else { |
2691 | DBG_log("find_connection: concluding with empty"); |
2692 | } |
2693 | } |
2694 | |
2695 | return best; |
2696 | } |
2697 | |
2698 | struct connection *oppo_instantiate(struct connection *c, |
2699 | const struct id *remote_id, |
2700 | /* both host and client */ |
2701 | const ip_address *local_address, |
2702 | const ip_address *remote_address) |
2703 | { |
2704 | passert(local_address != NULL)({ _Bool assertion__ = local_address != ((void*)0); if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2704 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "local_address != ((void*)0)" ); } (void) 1; }); |
2705 | passert(remote_address != NULL)({ _Bool assertion__ = remote_address != ((void*)0); if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2705 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "remote_address != ((void*)0)" ); } (void) 1; }); |
2706 | address_buf lb, rb; |
2707 | dbg("oppo instantiating c=\"%s\" with c->routing %s between %s -> %s",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("oppo instantiating c=\"%s\" with c->routing %s between %s -> %s" , c->name, enum_name(&routing_story, c->spd.routing ), str_address(local_address, &lb), str_address(remote_address , &rb)); } } |
2708 | c->name, enum_name(&routing_story, c->spd.routing),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("oppo instantiating c=\"%s\" with c->routing %s between %s -> %s" , c->name, enum_name(&routing_story, c->spd.routing ), str_address(local_address, &lb), str_address(remote_address , &rb)); } } |
2709 | str_address(local_address, &lb), str_address(remote_address, &rb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("oppo instantiating c=\"%s\" with c->routing %s between %s -> %s" , c->name, enum_name(&routing_story, c->spd.routing ), str_address(local_address, &lb), str_address(remote_address , &rb)); } }; |
2710 | |
2711 | struct connection *d = instantiate(c, remote_address, remote_id, null_shunk); |
2712 | |
2713 | passert(d->spd.spd_next == NULL)({ _Bool assertion__ = d->spd.spd_next == ((void*)0); if ( !assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2713, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "d->spd.spd_next == ((void*)0)" ); } (void) 1; }); |
2714 | |
2715 | /* fill in our client side */ |
2716 | if (d->spd.this.has_client) { |
2717 | /* |
2718 | * There was a client in the abstract connection so we |
2719 | * demand that either ... |
2720 | */ |
2721 | |
2722 | /* opportunistic connections do not use port selectors */ |
2723 | if (address_in_selector_range(*local_address, d->spd.this.client)) { |
2724 | /* |
2725 | * the required client is within that subnet |
2726 | * narrow it(?), ... |
2727 | */ |
2728 | d->spd.this.client = selector_from_address(*local_address); |
2729 | } else if (address_eq_address(*local_address, d->spd.this.host_addr)) { |
2730 | /* |
2731 | * or that it is our private ip in case we are |
2732 | * behind a port forward. |
2733 | */ |
2734 | update_selector_hport(&d->spd.this.client, 0){ (&d->spd.this.client)->hport = (0); }; |
2735 | } else { |
2736 | passert_failllog_passert(c->logger, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2736, }; &here ; }), |
2737 | "local address does not match the host or client"); |
2738 | } |
2739 | } else { |
2740 | /* |
2741 | * There was no client in the abstract connection so |
2742 | * we demand that the required client be the host. |
2743 | */ |
2744 | passert(address_eq_address(*local_address, d->spd.this.host_addr))({ _Bool assertion__ = address_eq_address(*local_address, d-> spd.this.host_addr); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2744, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "address_eq_address(*local_address, d->spd.this.host_addr)" ); } (void) 1; }); |
2745 | } |
2746 | |
2747 | /* |
2748 | * Fill in peer's client side. |
2749 | * If the client is the peer, excise the client from the connection. |
2750 | */ |
2751 | passert(d->policy & POLICY_OPPORTUNISTIC)({ _Bool assertion__ = d->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX)); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2751, }; &here ; }); const struct logger *logger_ = &failsafe_logger; llog_passert (logger_, here, "%s", "d->policy & ((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))" ); } (void) 1; }); |
2752 | passert(address_in_selector_range(*remote_address, d->spd.that.client))({ _Bool assertion__ = address_in_selector_range(*remote_address , d->spd.that.client); if (!assertion__) { where_t here = ( { static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2752, }; &here; } ); const struct logger *logger_ = &failsafe_logger; llog_passert (logger_, here, "%s", "address_in_selector_range(*remote_address, d->spd.that.client)" ); } (void) 1; }); |
2753 | d->spd.that.client = selector_from_address(*remote_address); |
2754 | |
2755 | if (address_eq_address(*remote_address, d->spd.that.host_addr)) |
2756 | d->spd.that.has_client = false0; |
2757 | |
2758 | /* |
2759 | * Adjust routing if something is eclipsing c. |
2760 | * It must be a %hold for us (hard to passert this). |
2761 | * If there was another instance eclipsing, we'd be using it. |
2762 | */ |
2763 | if (c->spd.routing == RT_ROUTED_ECLIPSED) |
2764 | d->spd.routing = RT_ROUTED_PROSPECTIVE; |
2765 | |
2766 | /* |
2767 | * Remember if the template is routed: |
2768 | * if so, this instance applies for initiation |
2769 | * even if it is created for responding. |
2770 | */ |
2771 | if (routed(c->spd.routing)((c->spd.routing) > RT_UNROUTED_HOLD)) |
2772 | d->instance_initiation_ok = true1; |
2773 | |
2774 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
2775 | char topo[CONN_BUF_LEN(2 * ((sizeof(subnet_buf) + sizeof(address_buf) + sizeof(id_buf ) + sizeof(subnet_buf) + 10) - 1) + 4)]; |
2776 | connection_buf inst; |
2777 | DBG_log("oppo_instantiate() instantiated "PRI_CONNECTION"\"%s\"%s"" with routing %s: %s", |
2778 | pri_connection(d, &inst)(d)->name, str_connection_instance(d, &inst), |
2779 | enum_name(&routing_story, d->spd.routing), |
2780 | format_connection(topo, sizeof(topo), d, &d->spd)); |
2781 | } |
2782 | return d; |
2783 | } |
2784 | |
2785 | /* |
2786 | * Outgoing opportunistic connection. |
2787 | * |
2788 | * Find and instantiate a connection for an outgoing Opportunistic connection. |
2789 | * We've already discovered its gateway. |
2790 | * We look for a connection such that: |
2791 | * + this is one of our interfaces |
2792 | * + this subnet contains our_client (or we are our_client) |
2793 | * (we will specialize the client). We prefer the smallest such subnet. |
2794 | * + that subnet contains peer_clent (we will specialize the client). |
2795 | * We prefer the smallest such subnet. |
2796 | * + is opportunistic |
2797 | * + that peer is NO_IP |
2798 | * + don't care about Phase 1 IDs (probably should be default) |
2799 | * We could look for a connection that already had the desired peer |
2800 | * (rather than NO_IP) specified, but it doesn't seem worth the |
2801 | * bother. |
2802 | * |
2803 | * We look for the routed policy applying to the narrowest subnets. |
2804 | * We only succeed if we find such a policy AND it is satisfactory. |
2805 | * |
2806 | * The body of the inner loop is a lot like that in |
2807 | * find_connection_for_clients. In this case, we know the gateways |
2808 | * that we need to instantiate an opportunistic connection. |
2809 | */ |
2810 | struct connection *build_outgoing_opportunistic_connection(const ip_endpoint *local_client, |
2811 | const ip_endpoint *remote_client) |
2812 | { |
2813 | /* |
2814 | * Did the caller do their job? |
2815 | * |
2816 | * Where the protocol includes a port, the endpoint ports |
2817 | * don't need to match but they do need to be defined. |
2818 | * |
2819 | * Unfortunately rcv_whack.c calls this function with a very |
2820 | * flimsy looking local/remote endpoints - both the protocol |
2821 | * and the port are missing. Hence some of the fuzzy checks |
2822 | * below. |
2823 | */ |
2824 | pendpoint(local_client)pexpect_endpoint(local_client, ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2824, }; &here; })); |
2825 | pendpoint(remote_client)pexpect_endpoint(remote_client, ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2825, }; &here; })); |
2826 | pexpect(endpoint_protocol(*local_client) == endpoint_protocol(*remote_client))({ _Bool assertion__ = endpoint_protocol(*local_client) == endpoint_protocol (*remote_client); if (!assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2826, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "endpoint_protocol(*local_client) == endpoint_protocol(*remote_client)" ); } assertion__; }); |
2827 | |
2828 | /* |
2829 | * Go through all the "half" oriented connections (remote |
2830 | * address is unset) looking for client that matches the |
2831 | * local/remote endpoint. |
2832 | * |
2833 | * Unfortunately there's no good data structure for doing |
2834 | * this, so ... |
2835 | * |
2836 | * Big hack: get the list of local addresses by iterating over |
2837 | * the interface endpoints, and then feed the endpoint's |
2838 | * address into FOR_EACH_HOST_PAIR_CONNECTION(LOCAL,UNSET). |
2839 | */ |
2840 | struct connection *best = NULL((void*)0); |
2841 | struct spd_route *best_spd_route = NULL((void*)0); |
2842 | struct iface_dev *last_iface_device = NULL((void*)0); |
2843 | for (struct iface_endpoint *p = interfaces; p != NULL((void*)0); p = p->next) { |
2844 | /* |
2845 | * Bigger hack: assume the interface endpoints |
2846 | * (ADDRESS:500 ADDRESS:4500) for a device are grouped |
2847 | * (mostly true, TCP, custom port?) and only search |
2848 | * when a new interface device is found. |
2849 | */ |
2850 | if (p->ip_dev == last_iface_device) { |
2851 | continue; |
2852 | } |
2853 | last_iface_device = p->ip_dev; |
2854 | /* |
2855 | * Go through those connections with our address and |
2856 | * NO_IP as hosts. |
2857 | * |
2858 | * We cannot know what port the peer would use, so we |
2859 | * assume that it is pluto_port (makes debugging |
2860 | * easier). |
2861 | * |
2862 | * XXX: the port doesn't matter! |
2863 | */ |
2864 | FOR_EACH_HOST_PAIR_CONNECTION(p->ip_dev->id_address, unset_address, c)for (struct connection *next_ = ((void*)0), *c = next_host_pair_connection (p->ip_dev->id_address, unset_address, &next_, 1, ( { static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2864, }; &here; } )); c != ((void*)0); c = next_host_pair_connection(p->ip_dev ->id_address, unset_address, &next_, 0, ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 2864, }; &here; }))) { |
2865 | |
2866 | dbg("checking %s", c->name){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("checking %s", c->name); } }; |
2867 | if (c->kind == CK_GROUP) |
2868 | continue; |
2869 | |
2870 | /* |
2871 | * for each sr of c, see if we have a new best |
2872 | * |
2873 | * Paul: while this code can reject unmatched |
2874 | * conns, it does not find the most narrow |
2875 | * match! |
2876 | */ |
2877 | for (struct spd_route *sr = &c->spd; sr != NULL((void*)0); sr = sr->spd_next) { |
2878 | if (!routed(sr->routing)((sr->routing) > RT_UNROUTED_HOLD)) { |
2879 | continue; |
2880 | } |
2881 | if (!endpoint_in_selector(*local_client, sr->this.client)) { |
2882 | continue; |
2883 | } |
2884 | if (!endpoint_in_selector(*remote_client, sr->that.client)) { |
2885 | continue; |
2886 | } |
2887 | |
2888 | /* |
2889 | * First or better solution. |
2890 | * |
2891 | * The test for better is: |
2892 | * sr's .this is narrower, or |
2893 | * sr's .this is same and sr's .that is narrower. |
2894 | * ??? not elegant, not symmetric. |
2895 | * Possible replacement test: |
2896 | * best_spd_route->this.client.maskbits + best_spd_route->that.client.maskbits > |
2897 | * sr->this.client.maskbits + sr->that.client.maskbits |
2898 | * but this knows too much about the representation of ip_subnet. |
2899 | * What is the correct semantics? |
2900 | * |
2901 | * XXX: selector_in_selector() is |
2902 | * exclusive - it excludes |
2903 | * selector_eq(). |
2904 | */ |
2905 | |
2906 | if (best_spd_route != NULL((void*)0) && |
2907 | selector_in_selector(best_spd_route->this.client, sr->this.client)) { |
2908 | /* |
2909 | * BEST_SPD_ROUTE is better. |
2910 | * |
2911 | * BEST_SPD_ROUTE's .this is |
2912 | * narrower than .SR's. |
2913 | */ |
2914 | continue; |
2915 | } |
2916 | if (best_spd_route != NULL((void*)0) && |
2917 | selector_eq_selector(best_spd_route->this.client, sr->this.client) && |
2918 | selector_in_selector(best_spd_route->that.client, sr->that.client)) { |
2919 | /* |
2920 | * BEST_SPD_ROUTE is better. |
2921 | * |
2922 | * Since BEST_SPD_ROUTE's |
2923 | * .this matches SR's, |
2924 | * tie-break with |
2925 | * BEST_SPD_ROUTE's .that |
2926 | * being narrower than .SR's. |
2927 | */ |
2928 | continue; |
2929 | } |
2930 | best = c; |
2931 | best_spd_route = sr; |
2932 | } |
2933 | } |
2934 | } |
2935 | |
2936 | if (best == NULL((void*)0) || |
2937 | NEVER_NEGOTIATE(best->policy)(((((best->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) || |
2938 | (best->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) == LEMPTY((lset_t)0) || |
2939 | best->kind != CK_TEMPLATE) { |
2940 | return NULL((void*)0); |
2941 | } |
2942 | |
2943 | /* XXX we might not yet know the ID! */ |
2944 | ip_address local_address = endpoint_address(*local_client); |
2945 | ip_address remote_address = endpoint_address(*remote_client); |
2946 | return oppo_instantiate(best, NULL((void*)0), &local_address, &remote_address); |
2947 | } |
2948 | |
2949 | /* |
2950 | * Find the connection to connection c's peer's client with the |
2951 | * largest value of .routing. All other things being equal, |
2952 | * preference is given to c. If none is routed, return NULL. |
2953 | * |
2954 | * If erop is non-null, set *erop to a connection sharing both |
2955 | * our client subnet and peer's client subnet with the largest value |
2956 | * of .routing. If none is erouted, set *erop to NULL. |
2957 | * |
2958 | * The return value is used to find other connections sharing a route. |
2959 | * *erop is used to find other connections sharing an eroute. |
2960 | */ |
2961 | struct connection *route_owner(struct connection *c, |
2962 | const struct spd_route *cur_spd, |
2963 | struct spd_route **srp, |
2964 | struct connection **erop, |
2965 | struct spd_route **esrp) |
2966 | { |
2967 | if (!oriented(c)) { |
2968 | llog(RC_LOG, c->logger, |
2969 | "route_owner: connection no longer oriented - system interface change?"); |
2970 | return NULL((void*)0); |
2971 | } |
2972 | |
2973 | struct connection |
2974 | *best_ro = c, |
2975 | *best_ero = c; |
2976 | struct spd_route *best_sr = NULL((void*)0), |
2977 | *best_esr = NULL((void*)0); |
2978 | enum routing_t best_routing = cur_spd->routing, |
2979 | best_erouting = best_routing; |
2980 | |
2981 | |
2982 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 2982, }; &here ; }), .c = NULL((void*)0), }; |
2983 | while (new2old_connection(&cq)) { |
2984 | struct connection *d = cq.c; |
2985 | |
2986 | if (!oriented(d)) |
2987 | continue; |
2988 | |
2989 | /* |
2990 | * consider policies different if the either in or out marks |
2991 | * differ (after masking) |
2992 | */ |
2993 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
2994 | DBG_log(" conn %s mark %" PRIu32"u" "/%#08" PRIx32"x" ", %" PRIu32"u" "/%#08" PRIx32"x" " vs", |
2995 | c->name, c->sa_marks.in.val, c->sa_marks.in.mask, |
2996 | c->sa_marks.out.val, c->sa_marks.out.mask); |
2997 | DBG_log(" conn %s mark %" PRIu32"u" "/%#08" PRIx32"x" ", %" PRIu32"u" "/%#08" PRIx32"x", |
2998 | d->name, d->sa_marks.in.val, d->sa_marks.in.mask, |
2999 | d->sa_marks.out.val, d->sa_marks.out.mask); |
3000 | } |
3001 | |
3002 | if ( (c->sa_marks.in.val & c->sa_marks.in.mask) != (d->sa_marks.in.val & d->sa_marks.in.mask) || |
3003 | (c->sa_marks.out.val & c->sa_marks.out.mask) != (d->sa_marks.out.val & d->sa_marks.out.mask) ) |
3004 | continue; |
3005 | |
3006 | struct spd_route *srd; |
3007 | |
3008 | for (srd = &d->spd; srd != NULL((void*)0); srd = srd->spd_next) { |
3009 | if (srd->routing == RT_UNROUTED) |
3010 | continue; |
3011 | |
3012 | const struct spd_route *src; |
3013 | |
3014 | for (src = &c->spd; src != NULL((void*)0); src = src->spd_next) { |
3015 | if (src == srd) |
3016 | continue; |
3017 | |
3018 | if (!selector_range_eq_selector_range(src->that.client, srd->that.client) || |
3019 | src->that.protocol != srd->that.protocol || |
3020 | src->that.port != srd->that.port || |
3021 | !sameaddr(&src->this.host_addr, |
3022 | &srd->this.host_addr)) |
3023 | continue; |
3024 | |
3025 | if (srd->routing > best_routing) { |
3026 | best_ro = d; |
3027 | best_sr = srd; |
3028 | best_routing = srd->routing; |
3029 | } |
3030 | |
3031 | if (selector_range_eq_selector_range(src->this.client, srd->this.client) && |
3032 | src->this.protocol == srd->this.protocol && |
3033 | src->this.port == srd->this.port && |
3034 | srd->routing > best_erouting) |
3035 | { |
3036 | best_ero = d; |
3037 | best_esr = srd; |
3038 | best_erouting = srd->routing; |
3039 | } |
3040 | } |
3041 | } |
3042 | } |
3043 | |
3044 | LSWDBGP(DBG_BASE, buf)for (_Bool lswlog_p = (cur_debugging & (((lset_t)1 << (DBG_BASE_IX)))); lswlog_p; lswlog_p = 0) for (char lswbuf[( (size_t)1024)], *lswbuf_ = lswbuf; lswbuf_ != ((void*)0); lswbuf_ = ((void*)0)) for (struct jambuf jambuf = array_as_jambuf((lswbuf ), sizeof(lswbuf)), *buf = &jambuf; buf != ((void*)0); buf = ((void*)0)) for (; buf != ((void*)0); jambuf_to_logger(buf , &failsafe_logger, DEBUG_STREAM), buf = ((void*)0)) { |
3045 | connection_buf cib; |
3046 | jam(buf, "route owner of \"%s\"%s %s: ", |
3047 | pri_connection(c, &cib)(c)->name, str_connection_instance(c, &cib), |
3048 | enum_name(&routing_story, cur_spd->routing)); |
3049 | |
3050 | if (!routed(best_routing)((best_routing) > RT_UNROUTED_HOLD)) { |
3051 | jam(buf, "NULL"); |
3052 | } else if (best_ro == c) { |
3053 | jam(buf, "self"); |
3054 | } else { |
3055 | connection_buf cib; |
3056 | jam(buf, ""PRI_CONNECTION"\"%s\"%s"" %s", |
3057 | pri_connection(best_ro, &cib)(best_ro)->name, str_connection_instance(best_ro, &cib ), |
3058 | enum_name(&routing_story, best_routing)); |
3059 | } |
3060 | |
3061 | if (erop != NULL((void*)0)) { |
3062 | jam(buf, "; eroute owner: "); |
3063 | if (!erouted(best_ero->spd.routing)((best_ero->spd.routing) != RT_UNROUTED)) { |
3064 | jam(buf, "NULL"); |
3065 | } else if (best_ero == c) { |
3066 | jam(buf, "self"); |
3067 | } else { |
3068 | connection_buf cib; |
3069 | jam(buf, ""PRI_CONNECTION"\"%s\"%s"" %s", |
3070 | pri_connection(best_ero, &cib)(best_ero)->name, str_connection_instance(best_ero, &cib ), |
3071 | enum_name(&routing_story, best_ero->spd.routing)); |
3072 | } |
3073 | } |
3074 | } |
3075 | |
3076 | if (erop != NULL((void*)0)) |
3077 | *erop = erouted(best_erouting)((best_erouting) != RT_UNROUTED) ? best_ero : NULL((void*)0); |
3078 | |
3079 | if (srp != NULL((void*)0) ) { |
3080 | *srp = best_sr; |
3081 | if (esrp != NULL((void*)0) ) |
3082 | *esrp = best_esr; |
3083 | } |
3084 | |
3085 | return routed(best_routing)((best_routing) > RT_UNROUTED_HOLD) ? best_ro : NULL((void*)0); |
3086 | } |
3087 | |
3088 | /* |
3089 | * Extracts the peer's ca from the chained list of public keys. |
3090 | */ |
3091 | static chunk_t get_peer_ca(struct pubkey_list *const *pubkey_db, |
3092 | const struct id *peer_id) |
3093 | { |
3094 | struct pubkey_list *p; |
3095 | |
3096 | for (p = *pubkey_db; p != NULL((void*)0); p = p->next) { |
3097 | struct pubkey *key = p->key; |
3098 | if (key->type == &pubkey_type_rsa && same_id(peer_id, &key->id)) |
3099 | return key->issuer; |
3100 | } |
3101 | return EMPTY_CHUNK((const chunk_t) { .ptr = ((void*)0), .len = 0 }); |
3102 | } |
3103 | |
3104 | /* |
3105 | * ??? NOTE: THESE IMPORTANT COMMENTS DO NOT REFLECT ANY CHANGES MADE AFTER FreeS/WAN. |
3106 | * |
3107 | * Given an up-until-now satisfactory connection, find the best connection |
3108 | * now that we just got the Phase 1 Id Payload from the peer. |
3109 | * |
3110 | * Comments in the code describe the (tricky!) matching criteria. |
3111 | * |
3112 | * In RFC 2409 "The Internet Key Exchange (IKE)", |
3113 | * in 5.1 "IKE Phase 1 Authenticated With Signatures", describing Main |
3114 | * Mode: |
3115 | * |
3116 | * Initiator Responder |
3117 | * ----------- ----------- |
3118 | * HDR, SA --> |
3119 | * <-- HDR, SA |
3120 | * HDR, KE, Ni --> |
3121 | * <-- HDR, KE, Nr |
3122 | * HDR*, IDii, [ CERT, ] SIG_I --> |
3123 | * <-- HDR*, IDir, [ CERT, ] SIG_R |
3124 | * |
3125 | * In 5.4 "Phase 1 Authenticated With a Pre-Shared Key": |
3126 | * |
3127 | * HDR, SA --> |
3128 | * <-- HDR, SA |
3129 | * HDR, KE, Ni --> |
3130 | * <-- HDR, KE, Nr |
3131 | * HDR*, IDii, HASH_I --> |
3132 | * <-- HDR*, IDir, HASH_R |
3133 | * |
3134 | * - the Responder receives the IDii payload: |
3135 | * + [PSK] after using PSK to decode this message |
3136 | * + before sending its IDir payload |
3137 | * + before using its ID in HASH_R computation |
3138 | * + [DSig] before using its private key to sign SIG_R |
3139 | * + before using the Initiator's ID in HASH_I calculation |
3140 | * + [DSig] before using the Initiator's public key to check SIG_I |
3141 | * |
3142 | * refine_host_connection can choose a different connection, as long as |
3143 | * nothing already used is changed. |
3144 | */ |
3145 | struct connection *refine_host_connection_on_responder(const struct state *st, |
3146 | const struct id *peer_id, |
3147 | const struct id *tarzan_id, |
3148 | lset_t auth_policy /* used by ikev1 */, |
3149 | enum keyword_authby this_authby /* used by ikev2 */, |
3150 | bool_Bool *fromcert) |
3151 | { |
3152 | struct connection *c = st->st_connection; |
3153 | const generalName_t *requested_ca = st->st_requested_ca; |
3154 | /* Ensure the caller and we know the IKE version we are looking for */ |
3155 | bool_Bool ikev1 = auth_policy != LEMPTY((lset_t)0); |
3156 | bool_Bool ikev2 = this_authby != AUTHBY_UNSET; |
3157 | |
3158 | *fromcert = false0; |
3159 | |
3160 | passert(ikev1 != ikev2 && ikev2 == (st->st_ike_version == IKEv2))({ _Bool assertion__ = ikev1 != ikev2 && ikev2 == (st ->st_connection->ike_version == IKEv2); if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 3160 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "ikev1 != ikev2 && ikev2 == (st->st_connection->ike_version == IKEv2)" ); } (void) 1; }); |
3161 | passert(this_authby != AUTHBY_NEVER)({ _Bool assertion__ = this_authby != AUTHBY_NEVER; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 3161 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "this_authby != AUTHBY_NEVER" ); } (void) 1; }); |
3162 | |
3163 | /* |
3164 | * Translate the IKEv1 policy onto an IKEv2 policy. |
3165 | * Saves duplicating the checks for v1 and v2, and the |
3166 | * v1 policy is a subset of the v2 policy. Use the ikev2 |
3167 | * bool for IKEv2-only feature checks. |
3168 | */ |
3169 | if (ikev1) { |
3170 | /* ??? are these cases mutually exclusive? */ |
3171 | if (LIN(POLICY_RSASIG, auth_policy)(((((lset_t)1 << (POLICY_RSASIG_IX))) & (auth_policy )) == (((lset_t)1 << (POLICY_RSASIG_IX))))) |
3172 | this_authby = AUTHBY_RSASIG; |
3173 | if (LIN(POLICY_PSK, auth_policy)(((((lset_t)1 << (POLICY_PSK_IX))) & (auth_policy)) == (((lset_t)1 << (POLICY_PSK_IX))))) |
3174 | this_authby = AUTHBY_PSK; |
3175 | passert(this_authby != AUTHBY_UNSET)({ _Bool assertion__ = this_authby != AUTHBY_UNSET; if (!assertion__ ) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 3175 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "this_authby != AUTHBY_UNSET" ); } (void) 1; }); |
3176 | } |
3177 | /* from here on, auth_policy must only be used to check POLICY_AGGRESSIVE */ |
3178 | |
3179 | connection_buf cib; |
3180 | dbg("refine_host_connection for %s: starting with "PRI_CONNECTION"",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("refine_host_connection for %s: starting with " "\"%s\"%s""", enum_name(&ike_version_names, st->st_connection ->ike_version), (c)->name, str_connection_instance(c, & cib)); } } |
3181 | enum_name(&ike_version_names, st->st_ike_version),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("refine_host_connection for %s: starting with " "\"%s\"%s""", enum_name(&ike_version_names, st->st_connection ->ike_version), (c)->name, str_connection_instance(c, & cib)); } } |
3182 | pri_connection(c, &cib)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("refine_host_connection for %s: starting with " "\"%s\"%s""", enum_name(&ike_version_names, st->st_connection ->ike_version), (c)->name, str_connection_instance(c, & cib)); } }; |
3183 | |
3184 | /* |
3185 | * Find the PEER's CA, check the per-state DB first. |
3186 | */ |
3187 | pexpect(st->st_remote_certs.processed)({ _Bool assertion__ = st->st_remote_certs.processed; if ( !assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 3187, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "st->st_remote_certs.processed" ); } assertion__; }); |
3188 | chunk_t peer_ca = get_peer_ca(&st->st_remote_certs.pubkey_db, peer_id); |
3189 | |
3190 | if (hunk_isempty(peer_ca)({ (peer_ca).len == 0; })) { |
3191 | peer_ca = get_peer_ca(&pluto_pubkeys, peer_id); |
3192 | } |
3193 | |
3194 | { |
3195 | int opl; |
3196 | int ppl; |
3197 | |
3198 | if (same_id(&c->spd.that.id, peer_id) && |
3199 | peer_ca.ptr != NULL((void*)0) && |
3200 | trusted_ca_nss(peer_ca, c->spd.that.ca, &ppl) && |
3201 | ppl == 0 && |
3202 | match_requested_ca(requested_ca, c->spd.this.ca, &opl) && |
3203 | opl == 0) { |
3204 | |
3205 | connection_buf cib; |
3206 | dbg("refine_host_connection: happy with starting point: "PRI_CONNECTION"",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("refine_host_connection: happy with starting point: " "\"%s\"%s""", (c)->name, str_connection_instance(c, &cib )); } } |
3207 | pri_connection(c, &cib)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("refine_host_connection: happy with starting point: " "\"%s\"%s""", (c)->name, str_connection_instance(c, &cib )); } }; |
3208 | |
3209 | /* |
3210 | * Peer ID matches current connection -- check |
3211 | * for "you Tarzan, me Jane" (remember this is |
3212 | * the responder). |
3213 | */ |
3214 | if (tarzan_id != NULL((void*)0)) { |
3215 | /* ??? pexpect(c->spd.spd_next == NULL); */ |
3216 | if (idr_wildmatch(&c->spd.this, tarzan_id, st->st_logger)) { |
3217 | dbg("the remote specified our ID in its IDr payload"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("the remote specified our ID in its IDr payload" ); } }; |
3218 | return c; |
3219 | } else { |
3220 | dbg("the remote specified an IDr that is not our ID for this connection"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("the remote specified an IDr that is not our ID for this connection" ); } }; |
3221 | } |
3222 | } else { |
3223 | dbg("the remote did not specify an IDr and our current connection is good enough"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("the remote did not specify an IDr and our current connection is good enough" ); } }; |
3224 | return c; |
3225 | } |
3226 | } |
3227 | } |
3228 | |
3229 | /* |
3230 | * The current connection won't do: search for one that will. |
3231 | * First search for one with the same pair of hosts. |
3232 | * If that fails, search for a suitable Road Warrior or Opportunistic |
3233 | * connection (i.e. wildcard peer IP). |
3234 | * We need to match: |
3235 | * - peer_id (slightly complicated by instantiation) |
3236 | * - if PSK auth, the key must not change (we used it to decode message) |
3237 | * - policy-as-used must be acceptable to new connection |
3238 | * - if initiator, also: |
3239 | * + our ID must not change (we sent it in previous message) |
3240 | * + our RSA key must not change (we used in in previous message) |
3241 | */ |
3242 | passert(c != NULL)({ _Bool assertion__ = c != ((void*)0); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__ , .file = "programs/pluto/connections.c", .line = 3242, }; & here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "c != ((void*)0)"); } (void ) 1; }); |
3243 | |
3244 | int best_our_pathlen = 0; |
3245 | int best_peer_pathlen = 0; |
3246 | struct connection *best_found = NULL((void*)0); |
3247 | int best_wildcards = 0; |
3248 | |
3249 | /* wcpip stands for: wildcard Peer IP? */ |
3250 | for (unsigned wcpip = 1; wcpip <= 2; wcpip++) { |
3251 | /* |
3252 | * When starting second time around we're willing to |
3253 | * settle for a connection that needs Peer IP |
3254 | * instantiated: Road Warrior or Opportunistic. Look |
3255 | * on list of connections for host pair with wildcard |
3256 | * Peer IP. |
3257 | */ |
3258 | ip_address remote = wcpip == 2 ? unset_address : endpoint_address(st->st_remote_endpoint); |
3259 | FOR_EACH_HOST_PAIR_CONNECTION(c->interface->ip_dev->id_address, remote, d)for (struct connection *next_ = ((void*)0), *d = next_host_pair_connection (c->interface->ip_dev->id_address, remote, &next_ , 1, ({ static const struct where here = { .func = __func__, . file = "programs/pluto/connections.c", .line = 3259, }; & here; })); d != ((void*)0); d = next_host_pair_connection(c-> interface->ip_dev->id_address, remote, &next_, 0, ( { static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 3259, }; &here; } ))) { |
3260 | |
3261 | int wildcards; |
3262 | bool_Bool matching_peer_id = (c->connalias != NULL((void*)0) && d->connalias != NULL((void*)0) && |
3263 | streq(c->connalias, d->connalias)(strcmp((c->connalias), (d->connalias)) == 0)) || |
3264 | match_id(peer_id, |
3265 | &d->spd.that.id, |
3266 | &wildcards); |
3267 | |
3268 | int peer_pathlen; |
3269 | bool_Bool matching_peer_ca = trusted_ca_nss(peer_ca, |
3270 | d->spd.that.ca, |
3271 | &peer_pathlen); |
3272 | |
3273 | int our_pathlen; |
3274 | bool_Bool matching_requested_ca = match_requested_ca(requested_ca, |
3275 | d->spd.this.ca, |
3276 | &our_pathlen); |
3277 | |
3278 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3279 | connection_buf b1, b2; |
3280 | DBG_log(" refine_host_connection: checking "PRI_CONNECTION"\"%s\"%s"" against "PRI_CONNECTION"\"%s\"%s"", best=%s with match=%d(id=%d(%d)/ca=%d(%d)/reqca=%d(%d))", |
3281 | pri_connection(c, &b1)(c)->name, str_connection_instance(c, &b1), pri_connection(d, &b2)(d)->name, str_connection_instance(d, &b2), |
3282 | best_found != NULL((void*)0) ? best_found->name : "(none)", |
3283 | matching_peer_id && matching_peer_ca && matching_requested_ca, |
3284 | matching_peer_id, wildcards, |
3285 | matching_peer_ca, peer_pathlen, |
3286 | matching_requested_ca, our_pathlen); |
3287 | DBG_log(" warning: not switching back to template of current instance"); |
3288 | } |
3289 | |
3290 | /* |
3291 | * 'You Tarzan, me Jane' check based on |
3292 | * received IDr (remember, this is the |
3293 | * responder). |
3294 | */ |
3295 | if (tarzan_id != NULL((void*)0)) { |
3296 | id_buf tzb; |
3297 | esb_buf tzesb; |
3298 | dbg(" peer expects us to be %s (%s) according to its IDr payload",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" peer expects us to be %s (%s) according to its IDr payload" , str_id_bytes(tarzan_id, jam_raw_bytes, &tzb), enum_show (&ike_id_type_names, tarzan_id->kind, &tzesb)); } } |
3299 | str_id(tarzan_id, &tzb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" peer expects us to be %s (%s) according to its IDr payload" , str_id_bytes(tarzan_id, jam_raw_bytes, &tzb), enum_show (&ike_id_type_names, tarzan_id->kind, &tzesb)); } } |
3300 | enum_show(&ike_id_type_names, tarzan_id->kind, &tzesb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" peer expects us to be %s (%s) according to its IDr payload" , str_id_bytes(tarzan_id, jam_raw_bytes, &tzb), enum_show (&ike_id_type_names, tarzan_id->kind, &tzesb)); } }; |
3301 | id_buf usb; |
3302 | esb_buf usesb; |
3303 | dbg(" this connection's local id is %s (%s)",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" this connection's local id is %s (%s)", str_id_bytes (&d->spd.this.id, jam_raw_bytes, &usb), enum_show( &ike_id_type_names, d->spd.this.id.kind, &usesb)); } } |
3304 | str_id(&d->spd.this.id, &usb),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" this connection's local id is %s (%s)", str_id_bytes (&d->spd.this.id, jam_raw_bytes, &usb), enum_show( &ike_id_type_names, d->spd.this.id.kind, &usesb)); } } |
3305 | enum_show(&ike_id_type_names, d->spd.this.id.kind, &usesb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" this connection's local id is %s (%s)", str_id_bytes (&d->spd.this.id, jam_raw_bytes, &usb), enum_show( &ike_id_type_names, d->spd.this.id.kind, &usesb)); } }; |
3306 | /* ??? pexpect(d->spd.spd_next == NULL); */ |
3307 | if (!idr_wildmatch(&d->spd.this, tarzan_id, st->st_logger)) { |
3308 | dbg(" skipping because peer IDr payload does not match our expected ID"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because peer IDr payload does not match our expected ID" ); } }; |
3309 | continue; |
3310 | } |
3311 | } else { |
3312 | dbg(" no IDr payload received from peer"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" no IDr payload received from peer"); } }; |
3313 | } |
3314 | |
3315 | /* only consider sec_label+template */ |
3316 | if (d->config->sec_label.len > 0 && d->kind != CK_TEMPLATE) { |
3317 | dbg(" skipping because sec_label requires template"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because sec_label requires template" ); } }; |
3318 | continue; |
3319 | } |
3320 | |
3321 | /* ignore group connections */ |
3322 | if (d->policy & POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX))) { |
3323 | dbg(" skipping because group connection"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because group connection"); } }; |
3324 | continue; |
3325 | } |
3326 | |
3327 | /* matching_peer_ca and matching_requested_ca are required */ |
3328 | if (!matching_peer_ca || !matching_requested_ca) { |
3329 | dbg(" skipping because !matching_peer_ca || !matching_requested_ca"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because !matching_peer_ca || !matching_requested_ca" ); } }; |
3330 | continue; |
3331 | } |
3332 | /* |
3333 | * Check if peer_id matches, exactly or after |
3334 | * instantiation. |
3335 | * Check for the match but also check to see if it's |
3336 | * the %fromcert + peer id match result. - matt |
3337 | */ |
3338 | bool_Bool d_fromcert = false0; |
3339 | if (!matching_peer_id) { |
3340 | d_fromcert = d->spd.that.id.kind == ID_FROMCERT; |
3341 | if (!d_fromcert) { |
3342 | dbg(" skipping because peer_id does not match"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because peer_id does not match") ; } }; |
3343 | continue; |
3344 | } |
3345 | } |
3346 | |
3347 | if (d->ike_version != st->st_ike_versionst_connection->ike_version) { |
3348 | /* IKE version has to match */ |
3349 | dbg(" skipping because mismatching IKE version"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because mismatching IKE version" ); } }; |
3350 | continue; |
3351 | } |
3352 | |
3353 | /* |
3354 | * Authentication used must fit policy of this |
3355 | * connection. |
3356 | */ |
3357 | if (ikev1) { |
3358 | if ((auth_policy ^ d->policy) & POLICY_AGGRESSIVE((lset_t)1 << (POLICY_AGGRESSIVE_IX))) { |
3359 | dbg(" skipping because AGGRESSIVE isn't right"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because AGGRESSIVE isn't right") ; } }; |
3360 | continue; /* differ about aggressive mode */ |
3361 | } |
3362 | |
3363 | if ((d->policy & auth_policy & ~POLICY_AGGRESSIVE((lset_t)1 << (POLICY_AGGRESSIVE_IX))) == LEMPTY((lset_t)0)) { |
3364 | /* Our auth isn't OK for this connection. */ |
3365 | dbg(" skipping because AUTH isn't right"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because AUTH isn't right"); } }; |
3366 | continue; |
3367 | } |
3368 | } else { |
3369 | /* |
3370 | * We need to check if leftauth and rightauth match, but we only know |
3371 | * what the remote end will send IKE_AUTH message.. |
3372 | * Note with IKEv2 we are guaranteed to be a RESPONDER |
3373 | * this_authby is the received AUTH payload type in IKE_AUTH reply. |
3374 | * This also means, we have already sent out AUTH payload, so we cannot |
3375 | * switch away from previously used this.authby. |
3376 | */ |
3377 | if (this_authby != d->spd.that.authby) { |
3378 | dbg(" skipping because mismatched authby"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because mismatched authby"); } }; |
3379 | continue; |
3380 | } |
3381 | } |
3382 | |
3383 | if (d->spd.this.xauth_server != c->spd.this.xauth_server) { |
3384 | /* Disallow IKEv2 CP or IKEv1 XAUTH mismatch */ |
3385 | dbg(" skipping because mismatched xauthserver"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because mismatched xauthserver") ; } }; |
3386 | continue; |
3387 | } |
3388 | |
3389 | if (d->spd.this.xauth_client != c->spd.this.xauth_client) { |
3390 | /* Disallow IKEv2 CP or IKEv1 XAUTH mismatch */ |
3391 | dbg(" skipping because mismatched xauthclient"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" skipping because mismatched xauthclient") ; } }; |
3392 | continue; |
3393 | } |
3394 | |
3395 | connection_buf b1, b2; |
3396 | dbg(" refine_host_connection: checked "PRI_CONNECTION" against "PRI_CONNECTION", now for see if best",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" refine_host_connection: checked ""\"%s\"%s" " against ""\"%s\"%s"", now for see if best", (c)->name, str_connection_instance (c, &b1), (d)->name, str_connection_instance(d, &b2 )); } } |
3397 | pri_connection(c, &b1), pri_connection(d, &b2)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" refine_host_connection: checked ""\"%s\"%s" " against ""\"%s\"%s"", now for see if best", (c)->name, str_connection_instance (c, &b1), (d)->name, str_connection_instance(d, &b2 )); } }; |
3398 | |
3399 | if (this_authby == AUTHBY_PSK) { |
3400 | /* secret must match the one we already used */ |
3401 | const chunk_t *dpsk = get_connection_psk(d); |
3402 | |
3403 | /* |
3404 | * We can change PSK mid-way in IKEv2 or aggressive mode. |
3405 | * If we initiated, the key we used and the key |
3406 | * we would have used with d must match. |
3407 | */ |
3408 | if (!((st->st_ike_versionst_connection->ike_version == IKEv2) || (auth_policy & POLICY_AGGRESSIVE((lset_t)1 << (POLICY_AGGRESSIVE_IX))))) { |
3409 | if (dpsk == NULL((void*)0)) |
3410 | continue; /* no secret */ |
3411 | } |
3412 | } |
3413 | |
3414 | if (this_authby == AUTHBY_RSASIG) { |
3415 | /* |
3416 | * We must at least be able to find our private key. |
3417 | * If we initiated, it must match the one we used in |
3418 | * the IKEv1 SIG_I payload or IKEv2 AUTH payload that |
3419 | * we sent previously. |
3420 | */ |
3421 | const struct pubkey_type *type = &pubkey_type_rsa; |
3422 | const struct private_key_stuff *pks = get_connection_private_key(d, type, st->st_logger); |
3423 | if (pks == NULL((void*)0)) |
3424 | continue; /* no key */ |
3425 | } |
3426 | |
3427 | /* |
3428 | * Paul: We need to check all the other relevant |
3429 | * policy bits, like compression, pfs, etc |
3430 | */ |
3431 | |
3432 | /* |
3433 | * d has passed all the tests. |
3434 | * We'll go with it if the Peer ID was an exact match. |
3435 | */ |
3436 | if (matching_peer_id && wildcards == 0 && |
3437 | peer_pathlen == 0 && our_pathlen == 0) { |
3438 | *fromcert = d_fromcert; |
3439 | connection_buf dcb; |
3440 | dbg(" returning "PRI_CONNECTION" because exact peer id match",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" returning ""\"%s\"%s"" because exact peer id match" , (d)->name, str_connection_instance(d, &dcb)); } } |
3441 | pri_connection(d, &dcb)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" returning ""\"%s\"%s"" because exact peer id match" , (d)->name, str_connection_instance(d, &dcb)); } }; |
3442 | return d; |
3443 | } |
3444 | |
3445 | /* |
3446 | * If it was a non-exact (wildcard) match, we'll |
3447 | * remember it as best_found in case an exact match |
3448 | * doesn't come along. |
3449 | * ??? the logic involving *_pathlen looks wrong. |
3450 | * ??? which matters more peer_pathlen or our_pathlen minimization? |
3451 | */ |
3452 | if (best_found == NULL((void*)0) || wildcards < best_wildcards || |
3453 | ((wildcards == best_wildcards && |
3454 | peer_pathlen < best_peer_pathlen) || |
3455 | (peer_pathlen == best_peer_pathlen && |
3456 | our_pathlen < best_our_pathlen))) { |
3457 | connection_buf cib; |
3458 | dbg(" refine_host_connection: picking new best "PRI_CONNECTION" (wild=%d, peer_pathlen=%d/our=%d)",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" refine_host_connection: picking new best " "\"%s\"%s"" (wild=%d, peer_pathlen=%d/our=%d)", (d)->name, str_connection_instance(d, &cib), wildcards, peer_pathlen , our_pathlen); } } |
3459 | pri_connection(d, &cib),{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" refine_host_connection: picking new best " "\"%s\"%s"" (wild=%d, peer_pathlen=%d/our=%d)", (d)->name, str_connection_instance(d, &cib), wildcards, peer_pathlen , our_pathlen); } } |
3460 | wildcards, peer_pathlen,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" refine_host_connection: picking new best " "\"%s\"%s"" (wild=%d, peer_pathlen=%d/our=%d)", (d)->name, str_connection_instance(d, &cib), wildcards, peer_pathlen , our_pathlen); } } |
3461 | our_pathlen){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" refine_host_connection: picking new best " "\"%s\"%s"" (wild=%d, peer_pathlen=%d/our=%d)", (d)->name, str_connection_instance(d, &cib), wildcards, peer_pathlen , our_pathlen); } }; |
3462 | *fromcert = d_fromcert; |
3463 | best_found = d; |
3464 | best_wildcards = wildcards; |
3465 | best_peer_pathlen = peer_pathlen; |
3466 | best_our_pathlen = our_pathlen; |
3467 | } |
3468 | } |
3469 | } |
3470 | return best_found; |
3471 | } |
3472 | |
3473 | /* |
3474 | * With virtual addressing, we must not allow someone to use an already |
3475 | * used (by another id) addr/net. |
3476 | */ |
3477 | static bool_Bool is_virtual_net_used(struct connection *c, |
3478 | const ip_selector *peer_net, |
3479 | const struct id *peer_id) |
3480 | { |
3481 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 3481, }; &here ; }), .c = NULL((void*)0), }; |
3482 | while (new2old_connection(&cq)) { |
3483 | struct connection *d = cq.c; |
3484 | switch (d->kind) { |
3485 | case CK_PERMANENT: |
3486 | case CK_TEMPLATE: |
3487 | case CK_INSTANCE: |
3488 | if ((selector_range_in_selector_range(*peer_net, d->spd.that.client) || |
3489 | selector_range_in_selector_range(d->spd.that.client, *peer_net)) && |
3490 | !same_id(&d->spd.that.id, peer_id)) |
3491 | { |
3492 | id_buf idb; |
3493 | connection_buf cbuf; |
3494 | subnet_buf client; |
3495 | llog(RC_LOG, c->logger, |
3496 | "Virtual IP %s overlaps with connection "PRI_CONNECTION"\"%s\"%s"" (kind=%s) '%s'", |
3497 | str_selector_subnet(peer_net, &client), |
3498 | pri_connection(d, &cbuf)(d)->name, str_connection_instance(d, &cbuf), |
3499 | enum_name(&connection_kind_names, d->kind), |
3500 | str_id(&d->spd.that.id, &idb)str_id_bytes(&d->spd.that.id, jam_raw_bytes, &idb)); |
3501 | |
3502 | if (!kernel_ops->overlap_supported) { |
3503 | llog(RC_LOG, c->logger, |
3504 | "Kernel method '%s' does not support overlapping IP ranges", |
3505 | kernel_ops->kern_name); |
3506 | return true1; |
3507 | } |
3508 | |
3509 | if (LIN(POLICY_OVERLAPIP, c->policy & d->policy)(((((lset_t)1 << (POLICY_OVERLAPIP_IX))) & (c->policy & d->policy)) == (((lset_t)1 << (POLICY_OVERLAPIP_IX ))))) { |
3510 | llog(RC_LOG, c->logger, |
3511 | "overlap is okay by mutual consent"); |
3512 | |
3513 | /* |
3514 | * Look for another overlap to report |
3515 | * on. |
3516 | */ |
3517 | break; |
3518 | } |
3519 | |
3520 | /* We're not allowed to overlap. Carefully report. */ |
3521 | |
3522 | const struct connection *x = |
3523 | LIN(POLICY_OVERLAPIP, c->policy)(((((lset_t)1 << (POLICY_OVERLAPIP_IX))) & (c->policy )) == (((lset_t)1 << (POLICY_OVERLAPIP_IX)))) ? d : |
3524 | LIN(POLICY_OVERLAPIP, d->policy)(((((lset_t)1 << (POLICY_OVERLAPIP_IX))) & (d->policy )) == (((lset_t)1 << (POLICY_OVERLAPIP_IX)))) ? c : |
3525 | NULL((void*)0); |
3526 | |
3527 | if (x == NULL((void*)0)) { |
3528 | llog(RC_LOG, c->logger, |
3529 | "overlap is forbidden (neither agrees to overlap)"); |
3530 | } else { |
3531 | llog(RC_LOG, c->logger, |
3532 | "overlap is forbidden ("PRI_CONNECTION"\"%s\"%s"" does not agree to overlap)", |
3533 | pri_connection(x, &cbuf)(x)->name, str_connection_instance(x, &cbuf)); |
3534 | } |
3535 | |
3536 | /* ??? why is this a separate log line? */ |
3537 | llog(RC_LOG, c->logger, |
3538 | "Your ID is '%s'", str_id(peer_id, &idb)str_id_bytes(peer_id, jam_raw_bytes, &idb)); |
3539 | |
3540 | return true1; /* already used by another one */ |
3541 | } |
3542 | break; |
3543 | |
3544 | case CK_GOING_AWAY: |
3545 | default: |
3546 | break; |
3547 | } |
3548 | } |
3549 | return false0; /* you can safely use it */ |
3550 | } |
3551 | |
3552 | /* |
3553 | * find_client_connection: given a connection suitable for ISAKMP |
3554 | * (i.e. the hosts match), find a one suitable for IPSEC |
3555 | * (i.e. with matching clients). |
3556 | * |
3557 | * If we don't find an exact match (not even our current connection), |
3558 | * we try for one that still needs instantiation. Try Road Warrior |
3559 | * abstract connections and the Opportunistic abstract connections. |
3560 | * This requires inverse instantiation: abstraction. |
3561 | * |
3562 | * After failing to find an exact match, we abstract the peer |
3563 | * to be NO_IP (the wildcard value). This enables matches with |
3564 | * Road Warrior and Opportunistic abstract connections. |
3565 | * |
3566 | * After failing that search, we also abstract the Phase 1 peer ID |
3567 | * if possible. If the peer's ID was the peer's IP address, we make |
3568 | * it NO_ID; instantiation will make it the peer's IP address again. |
3569 | * |
3570 | * If searching for a Road Warrior abstract connection fails, |
3571 | * and conditions are suitable, we search for the best Opportunistic |
3572 | * abstract connection. |
3573 | * |
3574 | * Note: in the end, both Phase 1 IDs must be preserved, after any |
3575 | * instantiation. They are the IDs that have been authenticated. |
3576 | */ |
3577 | |
3578 | #define PATH_WEIGHT1 1 |
3579 | #define WILD_WEIGHT(7 + 1) (MAX_CA_PATH_LEN7 + 1) |
3580 | #define PRIO_WEIGHT((15 + 1) * (7 + 1)) ((MAX_WILDCARDS15 + 1) * WILD_WEIGHT(7 + 1)) |
3581 | |
3582 | /* fc_try: a helper function for find_client_connection */ |
3583 | static struct connection *fc_try(const struct connection *c, |
3584 | const struct host_pair *hp, |
3585 | const ip_selector *local_client, |
3586 | const ip_selector *remote_client) |
3587 | { |
3588 | struct connection *best = NULL((void*)0); |
3589 | policy_prio_t best_prio = BOTTOM_PRIO((policy_prio_t)0); |
3590 | const bool_Bool remote_is_host = selector_eq_address(*remote_client, |
3591 | c->spd.that.host_addr); |
3592 | |
3593 | err_t virtualwhy = NULL((void*)0); |
3594 | for (struct connection *d = hp->connections; d != NULL((void*)0); d = d->hp_next) { |
3595 | if (d->policy & POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX))) |
3596 | continue; |
3597 | |
3598 | int wildcards, pathlen; |
3599 | |
3600 | if (!(c->connalias != NULL((void*)0) && d->connalias != NULL((void*)0) && streq(c->connalias, d->connalias)(strcmp((c->connalias), (d->connalias)) == 0))) { |
3601 | if (!(same_id(&c->spd.this.id, &d->spd.this.id) && |
3602 | match_id(&c->spd.that.id, &d->spd.that.id, &wildcards) && |
3603 | trusted_ca_nss(c->spd.that.ca, d->spd.that.ca, &pathlen))) |
3604 | { |
3605 | continue; |
3606 | } |
3607 | } |
3608 | |
3609 | /* compare protocol and ports */ |
3610 | unsigned local_protocol = selector_protocol(*local_client)->ipproto; |
3611 | unsigned remote_protocol = selector_protocol(*remote_client)->ipproto; |
3612 | unsigned local_port = hport(selector_port(*local_client)); |
3613 | unsigned remote_port = hport(selector_port(*remote_client)); |
3614 | if (!(d->spd.this.protocol == local_protocol && |
3615 | d->spd.that.protocol == remote_protocol && |
3616 | (d->spd.this.port == 0 || d->spd.this.port == local_port) && |
3617 | (d->spd.that.has_port_wildcard || d->spd.that.port == remote_port))) |
3618 | { |
3619 | continue; |
3620 | } |
3621 | |
3622 | /* |
3623 | * non-Opportunistic case: |
3624 | * local_client must match. |
3625 | * |
3626 | * So must remote_client, but the testing is complicated |
3627 | * by the fact that the peer might be a wildcard |
3628 | * and if so, the default value of that.client |
3629 | * won't match the default remote_net. The appropriate test: |
3630 | * |
3631 | * If d has a peer client, it must match remote_net. |
3632 | * If d has no peer client, remote_net must just have peer itself. |
3633 | */ |
3634 | |
3635 | for (const struct spd_route *sr = &d->spd; |
3636 | best != d && sr != NULL((void*)0); sr = sr->spd_next) { |
3637 | |
3638 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3639 | selector_buf s1, d1; |
3640 | selector_buf s3, d3; |
3641 | DBG_log(" fc_try trying %s:%s:%d/%d -> %s:%d/%d%s vs %s:%s:%d/%d -> %s:%d/%d%s", |
3642 | c->name, str_selector(local_client, &s1), |
3643 | c->spd.this.protocol, c->spd.this.port, |
3644 | str_selector(remote_client, &d1), |
3645 | c->spd.that.protocol, c->spd.that.port, |
3646 | is_virtual_connection(c) ? |
3647 | "(virt)" : "", d->name, |
3648 | str_selector(&sr->this.client, &s3), |
3649 | sr->this.protocol, sr->this.port, |
3650 | str_selector(&sr->that.client, &d3), |
3651 | sr->that.protocol, sr->that.port, |
3652 | is_virtual_sr(sr) ? "(virt)" : ""); |
3653 | } |
3654 | |
3655 | if (!selector_range_eq_selector_range(sr->this.client, *local_client)) { |
3656 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3657 | selector_buf s1, s3; |
3658 | DBG_log(" our client (%s) not in local_net (%s)", |
3659 | str_selector(&sr->this.client, &s3), |
3660 | str_selector(local_client, &s1)); |
3661 | } |
3662 | continue; |
3663 | } |
3664 | |
3665 | if (sr->that.has_client) { |
3666 | |
3667 | if (!selector_range_eq_selector_range(sr->that.client, *remote_client) && |
3668 | !is_virtual_sr(sr)) { |
3669 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3670 | selector_buf d1, d3; |
3671 | DBG_log(" their client (%s) not in same remote_net (%s)", |
3672 | str_selector(&sr->that.client, &d3), |
3673 | str_selector(remote_client, &d1)); |
3674 | } |
3675 | continue; |
3676 | } |
3677 | |
3678 | virtualwhy = check_virtual_net_allowed(d, |
3679 | selector_subnet(*remote_client), |
3680 | sr->that.host_addr); |
3681 | |
3682 | if (is_virtual_sr(sr) && |
3683 | (virtualwhy != NULL((void*)0) || |
3684 | is_virtual_net_used(d, remote_client, |
3685 | &sr->that.id))) { |
3686 | dbg(" virtual net not allowed"){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" virtual net not allowed"); } }; |
3687 | continue; |
3688 | } |
3689 | } else if (!remote_is_host) { |
3690 | continue; |
3691 | } |
3692 | |
3693 | /* |
3694 | * We've run the gauntlet -- success: |
3695 | * We've got an exact match of subnets. |
3696 | * The connection is feasible, but we continue looking |
3697 | * for the best. |
3698 | * The highest priority wins, implementing eroute-like |
3699 | * rule. |
3700 | * - a routed connection is preferrred |
3701 | * - given that, the smallest number of ID wildcards |
3702 | * are preferred |
3703 | * - given that, the shortest CA pathlength is preferred |
3704 | * - given that, not switching is preferred |
3705 | */ |
3706 | policy_prio_t prio = |
3707 | PRIO_WEIGHT((15 + 1) * (7 + 1)) * routed(sr->routing)((sr->routing) > RT_UNROUTED_HOLD) + |
3708 | WILD_WEIGHT(7 + 1) * (MAX_WILDCARDS15 - wildcards) + |
3709 | PATH_WEIGHT1 * (MAX_CA_PATH_LEN7 - pathlen) + |
3710 | (c == d ? 1 : 0) + |
3711 | 1; |
3712 | if (prio > best_prio) { |
3713 | best = d; |
3714 | best_prio = prio; |
3715 | } |
3716 | } |
3717 | } |
3718 | |
3719 | if (best != NULL((void*)0) && NEVER_NEGOTIATE(best->policy)(((((best->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0)))) |
3720 | best = NULL((void*)0); |
3721 | |
3722 | dbg(" fc_try concluding with %s [%" PRIu32 "]",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" fc_try concluding with %s [%" "u" "]", (best ? best->name : "none"), best_prio); } } |
3723 | (best ? best->name : "none"), best_prio){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" fc_try concluding with %s [%" "u" "]", (best ? best->name : "none"), best_prio); } }; |
3724 | |
3725 | if (best == NULL((void*)0) && virtualwhy != NULL((void*)0)) { |
3726 | llog(RC_LOG, c->logger, |
3727 | "peer proposal was rejected in a virtual connection policy: %s", |
3728 | virtualwhy); |
3729 | } |
3730 | |
3731 | return best; |
3732 | } |
3733 | |
3734 | static struct connection *fc_try_oppo(const struct connection *c, |
3735 | const struct host_pair *hp, |
3736 | const ip_selector *local_client, |
3737 | const ip_selector *remote_client) |
3738 | { |
3739 | struct connection *best = NULL((void*)0); |
3740 | policy_prio_t best_prio = BOTTOM_PRIO((policy_prio_t)0); |
3741 | |
3742 | for (struct connection *d = hp->connections; d != NULL((void*)0); d = d->hp_next) { |
3743 | if (d->policy & POLICY_GROUP((lset_t)1 << (POLICY_GROUP_IX))) |
3744 | continue; |
3745 | |
3746 | int wildcards, pathlen; |
3747 | if (!(same_id(&c->spd.this.id, &d->spd.this.id) && |
3748 | match_id(&c->spd.that.id, &d->spd.that.id, &wildcards) && |
3749 | trusted_ca_nss(c->spd.that.ca, d->spd.that.ca, &pathlen))) |
3750 | { |
3751 | continue; |
3752 | } |
3753 | |
3754 | /* compare protocol and ports */ |
3755 | unsigned local_protocol = selector_protocol(*local_client)->ipproto; |
3756 | unsigned remote_protocol = selector_protocol(*remote_client)->ipproto; |
3757 | unsigned local_port = hport(selector_port(*local_client)); |
3758 | unsigned remote_port = hport(selector_port(*remote_client)); |
3759 | if (d->spd.this.protocol != local_protocol || |
3760 | (d->spd.this.port && d->spd.this.port != local_port) || |
3761 | d->spd.that.protocol != remote_protocol || |
3762 | (d->spd.that.port != remote_port && |
3763 | !d->spd.that.has_port_wildcard)) |
3764 | continue; |
3765 | |
3766 | /* |
3767 | * Opportunistic case: |
3768 | * local_net must be inside d->spd.this.client |
3769 | * and remote_net must be inside d->spd.that.client |
3770 | * Note: this host_pair chain also has shunt |
3771 | * eroute conns (clear, drop), but they won't |
3772 | * be marked as opportunistic. |
3773 | */ |
3774 | |
3775 | for (const struct spd_route *sr = &d->spd; |
3776 | sr != NULL((void*)0); sr = sr->spd_next) { |
3777 | |
3778 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3779 | selector_buf s1; |
3780 | selector_buf d1; |
3781 | selector_buf s3; |
3782 | selector_buf d3; |
3783 | DBG_log(" fc_try_oppo trying %s:%s -> %s vs %s:%s -> %s", |
3784 | c->name, str_selector(local_client, &s1), |
3785 | str_selector(remote_client, &d1), |
3786 | d->name, str_selector(&sr->this.client, &s3), |
3787 | str_selector(&sr->that.client, &d3)); |
3788 | } |
3789 | |
3790 | if (!selector_range_in_selector_range(*local_client, sr->this.client) || |
3791 | !selector_range_in_selector_range(*remote_client, sr->that.client)) |
3792 | continue; |
3793 | |
3794 | /* |
3795 | * The connection is feasible, but we continue looking |
3796 | * for the best. |
3797 | * The highest priority wins, implementing eroute-like |
3798 | * rule. |
3799 | * - our smallest client subnet is preferred (longest |
3800 | * mask) |
3801 | * - given that, peers smallest client subnet is preferred |
3802 | * - given that, a routed connection is preferrred |
3803 | * - given that, the smallest number of ID wildcards |
3804 | * are preferred |
3805 | * - given that, the shortest CA pathlength is preferred |
3806 | */ |
3807 | policy_prio_t prio = |
3808 | PRIO_WEIGHT((15 + 1) * (7 + 1)) * (d->policy_prio + routed(sr->routing)((sr->routing) > RT_UNROUTED_HOLD)) + |
3809 | WILD_WEIGHT(7 + 1) * (MAX_WILDCARDS15 - wildcards) + |
3810 | PATH_WEIGHT1 * (MAX_CA_PATH_LEN7 - pathlen); |
3811 | |
3812 | if (prio > best_prio) { |
3813 | best = d; |
3814 | best_prio = prio; |
3815 | } |
3816 | } |
3817 | } |
3818 | |
3819 | /* if the best wasn't opportunistic, we fail: it must be a shunt */ |
3820 | if (best != NULL((void*)0) && |
3821 | (NEVER_NEGOTIATE(best->policy)(((((best->policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX )) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) || |
3822 | (best->policy & POLICY_OPPORTUNISTIC((lset_t)1 << (POLICY_OPPORTUNISTIC_IX))) == LEMPTY((lset_t)0))) |
3823 | best = NULL((void*)0); |
3824 | |
3825 | dbg(" fc_try_oppo concluding with %s [%" PRIu32 "]",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" fc_try_oppo concluding with %s [%" "u" "]", (best ? best->name : "none"), best_prio); } } |
3826 | (best ? best->name : "none"), best_prio){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" fc_try_oppo concluding with %s [%" "u" "]", (best ? best->name : "none"), best_prio); } }; |
3827 | return best; |
3828 | } |
3829 | |
3830 | struct connection *find_v1_client_connection(struct connection *const c, |
3831 | const ip_selector *local_client, |
3832 | const ip_selector *remote_client) |
3833 | { |
3834 | struct connection *d; |
3835 | |
3836 | /* weird things can happen to our interfaces */ |
3837 | if (!oriented(c)) { |
3838 | return NULL((void*)0); |
3839 | } |
3840 | |
3841 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3842 | selectors_buf sb; |
3843 | DBG_log("find_v1_client_connection starting with %s", c->name); |
3844 | DBG_log(" looking for %s", |
3845 | str_selectors(local_client, remote_client, &sb)); |
3846 | } |
3847 | |
3848 | /* |
3849 | * Give priority to current connection |
3850 | * but even greater priority to a routed concrete connection. |
3851 | */ |
3852 | { |
3853 | struct connection *unrouted = NULL((void*)0); |
3854 | int srnum = -1; |
3855 | |
3856 | for (const struct spd_route *sr = &c->spd; unrouted == NULL((void*)0) && sr != NULL((void*)0); |
3857 | sr = sr->spd_next) { |
3858 | srnum++; |
3859 | |
3860 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3861 | selector_buf s2; |
3862 | selector_buf d2; |
3863 | DBG_log(" concrete checking against sr#%d %s -> %s", srnum, |
3864 | str_selector(&sr->this.client, &s2), |
3865 | str_selector(&sr->that.client, &d2)); |
3866 | } |
3867 | |
3868 | unsigned local_protocol = selector_protocol(*local_client)->ipproto; |
3869 | unsigned local_port = selector_port(*local_client).hport; |
3870 | unsigned remote_protocol = selector_protocol(*remote_client)->ipproto; |
3871 | unsigned remote_port = selector_port(*remote_client).hport; |
3872 | if (selector_range_eq_selector_range(sr->this.client, *local_client) && |
3873 | selector_range_eq_selector_range(sr->that.client, *remote_client) && |
3874 | sr->this.protocol == local_protocol && |
3875 | (!sr->this.port || sr->this.port == local_port) && |
3876 | (sr->that.protocol == remote_protocol) && |
3877 | (!sr->that.port || sr->that.port == remote_port)) { |
3878 | if (routed(sr->routing)((sr->routing) > RT_UNROUTED_HOLD)) |
3879 | return c; |
3880 | |
3881 | unrouted = c; |
3882 | } |
3883 | } |
3884 | |
3885 | /* exact match? */ |
3886 | /* |
3887 | * clang 3.4 says: warning: Access to field 'host_pair' results in a dereference of a null pointer (loaded from variable 'c') |
3888 | * If so, the caller must have passed NULL for it |
3889 | * and earlier references would be wrong (segfault). |
3890 | */ |
3891 | d = fc_try(c, c->host_pair, local_client, remote_client); |
3892 | |
3893 | dbg(" fc_try %s gives %s", c->name, (d ? d->name : "none")){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" fc_try %s gives %s", c->name, (d ? d-> name : "none")); } }; |
3894 | |
3895 | if (d == NULL((void*)0)) |
3896 | d = unrouted; |
3897 | } |
3898 | |
3899 | if (d == NULL((void*)0)) { |
3900 | /* |
3901 | * look for an abstract connection to match |
3902 | */ |
3903 | const struct host_pair *hp = NULL((void*)0); |
3904 | for (const struct spd_route *sra = &c->spd; |
3905 | sra != NULL((void*)0) && hp == NULL((void*)0); sra = sra->spd_next) { |
3906 | hp = find_host_pair(sra->this.host_addr, unset_address); |
3907 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
3908 | selector_buf s2; |
3909 | selector_buf d2; |
3910 | DBG_log(" checking hostpair %s -> %s is %s", |
3911 | str_selector(&sra->this.client, &s2), |
3912 | str_selector(&sra->that.client, &d2), |
3913 | (hp ? "found" : "not found")); |
3914 | } |
3915 | } |
3916 | |
3917 | if (hp != NULL((void*)0)) { |
3918 | /* RW match with actual remote_id or abstract remote_id? */ |
3919 | d = fc_try(c, hp, local_client, remote_client); |
3920 | |
3921 | if (d == NULL((void*)0) && |
3922 | selector_contains_one_address(*local_client) && |
3923 | selector_contains_one_address(*remote_client)) { |
3924 | /* |
3925 | * Opportunistic match? |
3926 | * Always use abstract remote_id. |
3927 | * Note that later instantiation will result |
3928 | * in the same remote_id. |
3929 | */ |
3930 | d = fc_try_oppo(c, hp, local_client, remote_client); |
3931 | } |
3932 | } |
3933 | } |
3934 | |
3935 | dbg(" concluding with d = %s", (d ? d->name : "none")){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log(" concluding with d = %s", (d ? d->name : "none" )); } }; |
3936 | return d; |
3937 | } |
3938 | |
3939 | /* signed result suitable for quicksort */ |
3940 | int connection_compare(const struct connection *ca, |
3941 | const struct connection *cb) |
3942 | { |
3943 | int ret; |
3944 | |
3945 | ret = strcmp(ca->name, cb->name); |
3946 | if (ret != 0) |
3947 | return ret; |
3948 | |
3949 | /* note: enum connection_kind behaves like int */ |
3950 | ret = ca->kind - cb->kind; |
3951 | if (ret != 0) |
3952 | return ret; |
3953 | |
3954 | /* same name, and same type */ |
3955 | switch (ca->kind) { |
3956 | case CK_INSTANCE: |
3957 | return ca->instance_serial < cb->instance_serial ? -1 : |
3958 | ca->instance_serial > cb-> instance_serial ? 1 : 0; |
3959 | |
3960 | default: |
3961 | return (ca->policy_prio < cb->policy_prio ? -1 : |
3962 | ca->policy_prio > cb->policy_prio ? 1 : 0); |
3963 | } |
3964 | } |
3965 | |
3966 | static int connection_compare_qsort(const void *a, const void *b) |
3967 | { |
3968 | return connection_compare(*(const struct connection *const *)a, |
3969 | *(const struct connection *const *)b); |
3970 | } |
3971 | |
3972 | static void show_one_sr(struct show *s, |
3973 | const struct connection *c, |
3974 | const struct spd_route *sr, |
3975 | const char *instance) |
3976 | { |
3977 | char topo[CONN_BUF_LEN(2 * ((sizeof(subnet_buf) + sizeof(address_buf) + sizeof(id_buf ) + sizeof(subnet_buf) + 10) - 1) + 4)]; |
3978 | ipstr_buf thisipb, thatipb; |
3979 | |
3980 | show_comment(s, "\"%s\"%s: %s; %s; eroute owner: #%lu", |
3981 | c->name, instance, |
3982 | format_connection(topo, sizeof(topo), c, sr), |
3983 | enum_name(&routing_story, sr->routing), |
3984 | sr->eroute_owner); |
3985 | |
3986 | #define OPT_HOST(h, ipb) (address_is_specified(h) ? str_address(&h, &ipb) : "unset") |
3987 | |
3988 | /* note: this macro generates a pair of arguments */ |
3989 | #define OPT_PREFIX_STR(pre, s) (s) == NULL((void*)0) ? "" : (pre), (s) == NULL((void*)0)? "" : (s) |
3990 | |
3991 | show_comment(s, |
3992 | "\"%s\"%s: %s; my_ip=%s; their_ip=%s%s%s%s%s; my_updown=%s;", |
3993 | c->name, instance, |
3994 | oriented(c) ? "oriented" : "unoriented", |
3995 | OPT_HOST(c->spd.this.host_srcip, thisipb), |
3996 | OPT_HOST(c->spd.that.host_srcip, thatipb), |
3997 | OPT_PREFIX_STR("; mycert=", cert_nickname(&sr->this.cert)), |
3998 | OPT_PREFIX_STR("; peercert=", cert_nickname(&sr->that.cert)), |
3999 | (sr->this.updown == NULL((void*)0) || streq(sr->this.updown, "%disabled")(strcmp((sr->this.updown), ("%disabled")) == 0)) ? |
4000 | "<disabled>" : sr->this.updown |
4001 | ); |
4002 | |
4003 | #undef OPT_HOST |
4004 | #undef OPT_PREFIX_STR |
4005 | |
4006 | /* |
4007 | * Both should not be set, but if they are, we want |
4008 | * to know |
4009 | */ |
4010 | #define COMBO(END, SERVER, CLIENT) \ |
4011 | ((END).SERVER ? \ |
4012 | ((END).CLIENT ? "BOTH??" : "server") : \ |
4013 | ((END).CLIENT ? "client" : "none")) |
4014 | |
4015 | show_comment(s, |
4016 | "\"%s\"%s: xauth us:%s, xauth them:%s, %s my_username=%s; their_username=%s", |
4017 | c->name, instance, |
4018 | /* |
4019 | * Both should not be set, but if they are, we want to |
4020 | * know |
4021 | */ |
4022 | COMBO(sr->this, xauth_server, xauth_client), |
4023 | COMBO(sr->that, xauth_server, xauth_client), |
4024 | /* should really be an enum name */ |
4025 | sr->this.xauth_server ? |
4026 | c->xauthby == XAUTHBY_FILE ? |
4027 | "xauthby:file;" : |
4028 | c->xauthby == XAUTHBY_PAM ? |
4029 | "xauthby:pam;" : |
4030 | "xauthby:alwaysok;" : |
4031 | "", |
4032 | sr->this.xauth_username != NULL((void*)0) ? sr->this.xauth_username : "[any]", |
4033 | sr->that.xauth_username != NULL((void*)0) ? sr->that.xauth_username : "[any]"); |
4034 | |
4035 | esb_buf auth1, auth2; |
4036 | |
4037 | show_comment(s, |
4038 | "\"%s\"%s: our auth:%s, their auth:%s", |
4039 | c->name, instance, |
4040 | enum_show_short(&keyword_authby_names, sr->this.authby, &auth1), |
4041 | enum_show_short(&keyword_authby_names, sr->that.authby, &auth2)); |
4042 | |
4043 | show_comment(s, |
4044 | "\"%s\"%s: modecfg info: us:%s, them:%s, modecfg policy:%s, dns:%s, domains:%s, cat:%s;", |
4045 | c->name, instance, |
4046 | COMBO(sr->this, modecfg_server, modecfg_client), |
4047 | COMBO(sr->that, modecfg_server, modecfg_client), |
4048 | |
4049 | (c->policy & POLICY_MODECFG_PULL((lset_t)1 << (POLICY_MODECFG_PULL_IX))) ? "pull" : "push", |
4050 | (c->modecfg_dns == NULL((void*)0)) ? "unset" : c->modecfg_dns, |
4051 | (c->modecfg_domains == NULL((void*)0)) ? "unset" : c->modecfg_domains, |
4052 | sr->this.cat ? "set" : "unset"); |
4053 | |
4054 | #undef COMBO |
4055 | |
4056 | if (c->modecfg_banner != NULL((void*)0)) { |
4057 | show_comment(s, "\"%s\"%s: banner:%s;", |
4058 | c->name, instance, c->modecfg_banner); |
4059 | } |
4060 | |
4061 | /* |
4062 | * Show the first valid sec_label. |
4063 | * |
4064 | * We only support symmetric labels, but store it in struct |
4065 | * end - pick one. |
4066 | * |
4067 | * XXX: IKEv1 stores the negotiated sec_label in the state. |
4068 | */ |
4069 | if (sr->this.sec_label.len > 0) { |
4070 | /* negotiated (IKEv2) */ |
4071 | show_comment(s, "\"%s\"%s: sec_label:"PRI_SHUNK"%.*s", |
4072 | c->name, instance, pri_shunk(sr->this.sec_label)((int) (sr->this.sec_label).len), (const char *) ((sr-> this.sec_label).ptr)); |
4073 | } else if (c->config->sec_label.len > 0) { |
4074 | /* configured */ |
4075 | show_comment(s, "\"%s\"%s: sec_label:"PRI_SHUNK"%.*s", |
4076 | c->name, instance, pri_shunk(c->config->sec_label)((int) (c->config->sec_label).len), (const char *) ((c-> config->sec_label).ptr)); |
4077 | } else { |
4078 | show_comment(s, "\"%s\"%s: sec_label:unset;", c->name, instance); |
4079 | } |
4080 | } |
4081 | |
4082 | void show_one_connection(struct show *s, |
4083 | const struct connection *c) |
4084 | { |
4085 | const char *ifn; |
4086 | char ifnstr[2 * IFNAMSIZ16 + 2]; /* id_rname@id_vname\0 */ |
4087 | char instance[32]; |
4088 | char mtustr[8]; |
4089 | char sapriostr[13]; |
4090 | char satfcstr[13]; |
4091 | char nflogstr[8]; |
4092 | char markstr[2 * (2 * strlen("0xffffffff") + strlen("/")) + strlen(", ") ]; |
4093 | |
4094 | if (oriented(c)) { |
4095 | if (c->xfrmi != NULL((void*)0) && c->xfrmi->name != NULL((void*)0)) { |
4096 | char *n = jam_str(ifnstr, sizeof(ifnstr), |
4097 | c->xfrmi->name); |
4098 | add_str(ifnstr, sizeof(ifnstr), n, "@"); |
4099 | add_str(ifnstr, sizeof(ifnstr), n, |
4100 | c->interface->ip_dev->id_rname); |
4101 | ifn = ifnstr; |
4102 | } else { |
4103 | ifn = c->interface->ip_dev->id_rname; |
4104 | } |
4105 | } else { |
4106 | ifn = ""; |
4107 | }; |
4108 | |
4109 | instance[0] = '\0'; |
4110 | if (c->kind == CK_INSTANCE && c->instance_serial != 0) |
4111 | snprintf(instance, sizeof(instance), "[%lu]", |
4112 | c->instance_serial); |
4113 | |
4114 | /* Show topology. */ |
4115 | { |
4116 | const struct spd_route *sr = &c->spd; |
4117 | |
4118 | while (sr != NULL((void*)0)) { |
4119 | show_one_sr(s, c, sr, instance); |
4120 | sr = sr->spd_next; |
4121 | } |
4122 | } |
4123 | |
4124 | /* Show CAs */ |
4125 | if (c->spd.this.ca.ptr != NULL((void*)0) || c->spd.that.ca.ptr != NULL((void*)0)) { |
4126 | dn_buf this_ca, that_ca; |
4127 | show_comment(s, |
4128 | "\"%s\"%s: CAs: '%s'...'%s'", |
4129 | c->name, |
4130 | instance, |
4131 | str_dn_or_null(c->spd.this.ca, "%any", &this_ca), |
4132 | str_dn_or_null(c->spd.that.ca, "%any", &that_ca)); |
4133 | } |
4134 | |
4135 | show_comment(s, |
4136 | "\"%s\"%s: ike_life: %jds; ipsec_life: %jds; replay_window: %u; rekey_margin: %jds; rekey_fuzz: %lu%%; keyingtries: %lu;", |
4137 | c->name, |
4138 | instance, |
4139 | deltasecs(c->sa_ike_life_seconds), |
4140 | deltasecs(c->sa_ipsec_life_seconds), |
4141 | c->sa_replay_window, |
4142 | deltasecs(c->sa_rekey_margin), |
4143 | c->sa_rekey_fuzz, |
4144 | c->sa_keying_tries); |
4145 | |
4146 | show_comment(s, |
4147 | "\"%s\"%s: retransmit-interval: %jdms; retransmit-timeout: %jds; iketcp:%s; iketcp-port:%d;", |
4148 | c->name, |
4149 | instance, |
4150 | deltamillisecs(c->config->retransmit_interval), |
4151 | deltasecs(c->config->retransmit_timeout), |
4152 | c->iketcp == IKE_TCP_NO ? "no" : c->iketcp == IKE_TCP_ONLY ? "yes" : |
4153 | c->iketcp == IKE_TCP_FALLBACK ? "fallback" : "<BAD VALUE>", |
4154 | c->remote_tcpport); |
4155 | |
4156 | show_comment(s, |
4157 | "\"%s\"%s: initial-contact:%s; cisco-unity:%s; fake-strongswan:%s; send-vendorid:%s; send-no-esp-tfc:%s;", |
4158 | c->name, instance, |
4159 | bool_str(c->initial_contact), |
4160 | bool_str(c->cisco_unity), |
4161 | bool_str(c->fake_strongswan), |
4162 | bool_str(c->send_vendorid), |
4163 | bool_str(c->send_no_esp_tfc)); |
4164 | |
4165 | if (c->policy_next != NULL((void*)0)) { |
4166 | show_comment(s, |
4167 | "\"%s\"%s: policy_next: %s", |
4168 | c->name, instance, c->policy_next->name); |
4169 | } |
4170 | |
4171 | lset_t policy = c->policy; |
4172 | policy_buf pb; |
4173 | show_comment(s, "\"%s\"%s: policy: %s%s%s%s%s%s%s;", |
4174 | c->name, instance, |
4175 | c->ike_version > 0 ? enum_name(&ike_version_names, c->ike_version) : "", |
4176 | c->ike_version > 0 && policy != LEMPTY((lset_t)0) ? "+" : "", |
4177 | str_policy(policy, &pb), |
4178 | NEVER_NEGOTIATE(policy)(((((policy)) & (((lset_t)1 << (POLICY_ENCRYPT_IX)) | ((lset_t)1 << (POLICY_AUTHENTICATE_IX)))) == ((lset_t )0))) ? "+NEVER_NEGOTIATE" : "", |
4179 | (c->spd.this.key_from_DNS_on_demand || |
4180 | c->spd.that.key_from_DNS_on_demand) ? "; " : "", |
4181 | c->spd.this.key_from_DNS_on_demand ? "+lKOD" : "", |
4182 | c->spd.that.key_from_DNS_on_demand ? "+rKOD" : ""); |
4183 | |
4184 | if (c->ike_version == IKEv2) { |
4185 | lset_buf hashpolbuf; |
4186 | const char *hashstr = str_lset(&sighash_policy_bit_names, |
4187 | c->sighash_policy, |
4188 | &hashpolbuf); |
4189 | show_comment(s, "\"%s\"%s: v2-auth-hash-policy: %s;", |
4190 | c->name, instance, hashstr); |
4191 | } |
4192 | |
4193 | if (c->connmtu != 0) |
4194 | snprintf(mtustr, sizeof(mtustr), "%d", c->connmtu); |
4195 | else |
4196 | strcpy(mtustr, "unset"); |
4197 | |
4198 | if (c->sa_priority != 0) |
4199 | snprintf(sapriostr, sizeof(sapriostr), "%" PRIu32"u", c->sa_priority); |
4200 | else |
4201 | strcpy(sapriostr, "auto"); |
4202 | |
4203 | if (c->sa_tfcpad != 0) |
4204 | snprintf(satfcstr, sizeof(satfcstr), "%u", c->sa_tfcpad); |
4205 | else |
4206 | strcpy(satfcstr, "none"); |
4207 | |
4208 | policy_prio_buf prio; |
4209 | show_comment(s, |
4210 | "\"%s\"%s: conn_prio: %s; interface: %s; metric: %u; mtu: %s; sa_prio:%s; sa_tfc:%s;", |
4211 | c->name, instance, |
4212 | str_policy_prio(c->policy_prio, &prio), |
4213 | ifn, |
4214 | c->metric, |
4215 | mtustr, sapriostr, satfcstr); |
4216 | |
4217 | if (c->nflog_group != 0) |
4218 | snprintf(nflogstr, sizeof(nflogstr), "%d", c->nflog_group); |
4219 | else |
4220 | strcpy(nflogstr, "unset"); |
4221 | |
4222 | if (c->sa_marks.in.val != 0 || c->sa_marks.out.val != 0 ) { |
4223 | snprintf(markstr, sizeof(markstr), "%" PRIu32"u" "/%#08" PRIx32"x" ", %" PRIu32"u" "/%#08" PRIx32"x", |
4224 | c->sa_marks.in.val, c->sa_marks.in.mask, |
4225 | c->sa_marks.out.val, c->sa_marks.out.mask); |
4226 | } else { |
4227 | strcpy(markstr, "unset"); |
4228 | } |
4229 | |
4230 | show_comment(s, |
4231 | "\"%s\"%s: nflog-group: %s; mark: %s; vti-iface:%s; " |
4232 | "vti-routing:%s; vti-shared:%s;" |
4233 | " nic-offload:%s;" |
4234 | , |
4235 | c->name, instance, nflogstr, markstr, |
4236 | c->vti_iface == NULL((void*)0) ? "unset" : c->vti_iface, |
4237 | bool_str(c->vti_routing), |
4238 | bool_str(c->vti_shared), |
4239 | (c->nic_offload == yna_auto) ? "auto" : |
4240 | bool_str(c->nic_offload == yna_yes) |
4241 | ); |
4242 | |
4243 | { |
4244 | id_buf thisidb; |
4245 | id_buf thatidb; |
4246 | |
4247 | show_comment(s, |
4248 | "\"%s\"%s: our idtype: %s; our id=%s; their idtype: %s; their id=%s", |
4249 | c->name, instance, |
4250 | enum_name(&ike_id_type_names, c->spd.this.id.kind), |
4251 | str_id(&c->spd.this.id, &thisidb)str_id_bytes(&c->spd.this.id, jam_raw_bytes, &thisidb ), |
4252 | enum_name(&ike_id_type_names, c->spd.that.id.kind), |
4253 | str_id(&c->spd.that.id, &thatidb)str_id_bytes(&c->spd.that.id, jam_raw_bytes, &thatidb )); |
4254 | } |
4255 | |
4256 | /* slightly complicated stuff to avoid extra crap */ |
4257 | show_comment(s, |
4258 | "\"%s\"%s: dpd: %s; delay:%ld; timeout:%ld; nat-t: encaps:%s; nat_keepalive:%s; ikev1_natt:%s", |
4259 | c->name, instance, |
4260 | enum_name(&dpd_action_names, c->dpd_action), |
4261 | (long) deltasecs(c->dpd_delay), |
4262 | (long) deltasecs(c->dpd_timeout), |
4263 | (c->encaps == yna_auto) ? "auto" : |
4264 | bool_str(c->encaps == yna_yes), |
4265 | bool_str(c->nat_keepalive), |
4266 | (c->ikev1_natt == NATT_BOTH) ? "both" : |
4267 | (c->ikev1_natt == NATT_RFC) ? "rfc" : |
4268 | (c->ikev1_natt == NATT_DRAFTS) ? "drafts" : "none" |
4269 | ); |
4270 | |
4271 | if (!lmod_empty(c->extra_debugging)) { |
4272 | SHOW_JAMBUF(RC_COMMENT, s, buf)for (struct jambuf *buf = show_jambuf(s); buf != ((void*)0); jambuf_to_show (buf, s, RC_COMMENT), buf = ((void*)0)) { |
4273 | jam(buf, "\"%s\"%s: debug: ", |
4274 | c->name, instance); |
4275 | jam_lmod(buf, &debug_names, "+", c->extra_debugging); |
4276 | } |
4277 | } |
4278 | |
4279 | SHOW_JAMBUF(RC_COMMENT, s, buf)for (struct jambuf *buf = show_jambuf(s); buf != ((void*)0); jambuf_to_show (buf, s, RC_COMMENT), buf = ((void*)0)) { |
4280 | jam(buf, "\"%s\"%s: newest ISAKMP SA: #%lu; newest IPsec SA: #%lu; conn serial: "PRI_CO"$%u""", |
4281 | c->name, |
4282 | instance, |
4283 | c->newest_ike_sa, |
4284 | c->newest_ipsec_sa, |
4285 | pri_co(c->serialno)((c->serialno))); |
4286 | if (c->serial_from != UNSET_CO_SERIAL) { |
4287 | jam(buf, ", instantiated from: "PRI_CO"$%u"";", |
4288 | pri_co(c->serial_from)((c->serial_from))); |
4289 | } else { |
4290 | jam(buf, ";"); |
4291 | } |
4292 | } |
4293 | |
4294 | if (c->connalias != NULL((void*)0)) { |
4295 | show_comment(s, "\"%s\"%s: aliases: %s", |
4296 | c->name, |
4297 | instance, |
4298 | c->connalias); |
4299 | } |
4300 | |
4301 | show_ike_alg_connection(s, c, instance); |
4302 | show_kernel_alg_connection(s, c, instance); |
4303 | } |
4304 | |
4305 | void show_connections_status(struct show *s) |
4306 | { |
4307 | int count = 0; |
4308 | int active = 0; |
4309 | |
4310 | show_separator(s); |
4311 | show_comment(s, "Connection list:"); |
4312 | show_separator(s); |
4313 | |
4314 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 4314, }; &here ; }), .c = NULL((void*)0), }; |
4315 | while (new2old_connection(&cq)) { |
4316 | struct connection *c = cq.c; |
4317 | count++; |
4318 | if (c->spd.routing == RT_ROUTED_TUNNEL) |
4319 | active++; |
4320 | } |
4321 | |
4322 | if (count != 0) { |
4323 | /* make an array of connections, sort it, and report it */ |
4324 | |
4325 | struct connection **array = |
4326 | alloc_bytes(sizeof(struct connection *) * count, |
4327 | "connection array"); |
4328 | int i = 0; |
4329 | |
4330 | |
4331 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 4331, }; &here ; }), .c = NULL((void*)0), }; |
4332 | while (new2old_connection(&cq)) { |
4333 | array[i++] = cq.c; |
4334 | } |
4335 | |
4336 | /* sort it! */ |
4337 | qsort(array, count, sizeof(struct connection *), |
4338 | connection_compare_qsort); |
4339 | |
4340 | for (i = 0; i < count; i++) |
4341 | show_one_connection(s, array[i]); |
4342 | |
4343 | pfree(array); |
4344 | show_separator(s); |
4345 | } |
4346 | |
4347 | show_comment(s, "Total IPsec connections: loaded %d, active %d", |
4348 | count, active); |
4349 | } |
4350 | |
4351 | /* |
4352 | * Delete a connection if |
4353 | * - it is an instance and it is no longer in use. |
4354 | * - the ike state is not shared with another connection |
4355 | * We must be careful to avoid circularity: |
4356 | * we don't touch it if it is CK_GOING_AWAY. |
4357 | */ |
4358 | void connection_delete_unused_instance(struct connection **cp, |
4359 | struct state *old_state, |
4360 | struct fd *whackfd) |
4361 | { |
4362 | struct connection *c = (*cp); |
4363 | *cp = NULL((void*)0); |
4364 | |
4365 | if (c->kind != CK_INSTANCE) { |
4366 | dbg("connection %s is not an instance, skipping delete-unused", c->name){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection %s is not an instance, skipping delete-unused" , c->name); } }; |
4367 | return; |
4368 | } |
4369 | |
4370 | if (connection_is_pending(c)) { |
4371 | dbg("connection instance %s is pending, skipping delete-unused",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection instance %s is pending, skipping delete-unused" , c->name); } } |
4372 | c->name){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection instance %s is pending, skipping delete-unused" , c->name); } }; |
4373 | return; |
4374 | } |
4375 | |
4376 | if (LIN(POLICY_UP, c->policy)(((((lset_t)1 << (POLICY_UP_IX))) & (c->policy)) == (((lset_t)1 << (POLICY_UP_IX)))) && |
4377 | old_state != NULL((void*)0) && (IS_IKE_SA_ESTABLISHED(old_state)((old_state)->st_state->kind == STATE_V2_ESTABLISHED_IKE_SA ) || |
4378 | IS_V1_ISAKMP_SA_ESTABLISHED(old_state)((((lset_t)1 << ((old_state)->st_state->kind)) & (((lset_t)1 << (STATE_MAIN_R3)) | ((lset_t)1 << ( STATE_MAIN_I4)) | ((lset_t)1 << (STATE_AGGR_I2)) | ((lset_t )1 << (STATE_AGGR_R2)) | ((lset_t)1 << (STATE_XAUTH_R0 )) | ((lset_t)1 << (STATE_XAUTH_R1)) | ((lset_t)1 << (STATE_MODE_CFG_R0)) | ((lset_t)1 << (STATE_MODE_CFG_R1 )) | ((lset_t)1 << (STATE_MODE_CFG_R2)) | ((lset_t)1 << (STATE_MODE_CFG_I1)) | ((lset_t)1 << (STATE_XAUTH_I0)) | ((lset_t)1 << (STATE_XAUTH_I1)))) != ((lset_t)0)))) { |
4379 | /* |
4380 | * If this connection instance was previously for an |
4381 | * established sa planning to revive, don't delete. |
4382 | */ |
4383 | dbg("connection instance %s with serial "PRI_CO" is being revived, skipping delete-unused",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection instance %s with serial ""$%u"" is being revived, skipping delete-unused" , c->name, ((c->serialno))); } } |
4384 | c->name, pri_co(c->serialno)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection instance %s with serial ""$%u"" is being revived, skipping delete-unused" , c->name, ((c->serialno))); } }; |
4385 | return; |
4386 | } |
4387 | |
4388 | /* find the first */ |
4389 | struct state *st = state_by_connection(c, NULL((void*)0), NULL((void*)0), __func__); |
4390 | if (DBGP(DBG_BASE)(cur_debugging & (((lset_t)1 << (DBG_BASE_IX))))) { |
4391 | /* |
4392 | * Cross check that the state DB has been kept |
4393 | * up-to-date. |
4394 | */ |
4395 | struct state *dst = NULL((void*)0); |
4396 | FOR_EACH_STATE_NEW2OLD(dst)for (struct list_entry *entry_ = (&state_serialno_list_head )->head.older; entry_ != ((void*)0); entry_ = ((void*)0)) for (dst = (typeof(dst))entry_->data, entry_ = entry_->older ; dst != ((void*)0); dst = (typeof(dst))entry_->data, entry_ = entry_->older) { |
4397 | if (dst->st_connection == c) { |
4398 | break; |
4399 | } |
4400 | } |
4401 | /* found a state, may not be the same */ |
4402 | pexpect((dst == NULL) == (st == NULL))({ _Bool assertion__ = (dst == ((void*)0)) == (st == ((void*) 0)); if (!assertion__) { where_t here_ = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 4402, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_pexpect(logger_, here_, "%s", "(dst == ((void*)0)) == (st == ((void*)0))" ); } assertion__; }); |
4403 | st = dst; /* let the truth be free */ |
4404 | } |
4405 | |
4406 | if (st != NULL((void*)0)) { |
4407 | dbg("connection instance %s in use by #%lu, skipping delete-unused",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection instance %s in use by #%lu, skipping delete-unused" , c->name, st->st_serialno); } } |
4408 | c->name, st->st_serialno){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection instance %s in use by #%lu, skipping delete-unused" , c->name, st->st_serialno); } }; |
4409 | return; |
4410 | } |
4411 | |
4412 | dbg("connection instance %s is not being used, deleting", c->name){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("connection instance %s is not being used, deleting" , c->name); } }; |
4413 | /* XXX: something better? */ |
4414 | close_any(&c->logger->global_whackfd)close_any_fd((&c->logger->global_whackfd), ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 4414, }; &here; })); |
4415 | c->logger->global_whackfd = fd_dup(whackfd, HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 4415, }; &here ; })); |
4416 | delete_connection(&c, false0); |
4417 | } |
4418 | |
4419 | /* |
4420 | * Every time a state's connection is changed, the following need to happen: |
4421 | * |
4422 | * - update the connection->state hash table |
4423 | * |
4424 | * - discard the old connection when not in use |
4425 | */ |
4426 | void update_state_connection(struct state *st, struct connection *new) |
4427 | { |
4428 | struct connection *old = st->st_connection; |
4429 | passert(old != NULL)({ _Bool assertion__ = old != ((void*)0); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 4429 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "old != ((void*)0)"); } ( void) 1; }); |
4430 | passert(new != NULL)({ _Bool assertion__ = new != ((void*)0); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 4430 , }; &here; }); const struct logger *logger_ = &failsafe_logger ; llog_passert(logger_, here, "%s", "new != ((void*)0)"); } ( void) 1; }); |
4431 | |
4432 | if (old != new) { |
4433 | st->st_connection = new; |
4434 | st->st_v1_peer_alt_id = false0; /* must be rechecked against new 'that' */ |
4435 | rehash_state_connection(st); |
4436 | if (old != NULL((void*)0)) { |
4437 | connection_delete_unused_instance(&old, st, |
4438 | st->st_logger->global_whackfd); |
4439 | } |
4440 | } |
4441 | } |
4442 | |
4443 | /* |
4444 | * A template connection's eroute can be eclipsed by |
4445 | * either a %hold or an eroute for an instance iff |
4446 | * the template is a /32 -> /32. This requires some special casing. |
4447 | */ |
4448 | long eclipse_count = 0; |
4449 | |
4450 | struct connection *eclipsed(const struct connection *c, struct spd_route **esrp /*OUT*/) |
4451 | { |
4452 | /* |
4453 | * This function was changed in freeswan 2.02 and since |
4454 | * then has never worked because it always returned NULL. |
4455 | * It should be caught by the testing/pluto/co-terminal test cases. |
4456 | * ??? DHR doesn't know how much of this is true. |
4457 | */ |
4458 | |
4459 | /* ??? this logic seems broken: it doesn't try all spd_routes of c */ |
4460 | |
4461 | /* XXX This logic also predates support for protoports, which isn't handled below */ |
4462 | |
4463 | struct connection_query cq = { .where = HERE({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c", .line = 4463, }; &here ; }), .c = NULL((void*)0), }; |
4464 | while (new2old_connection(&cq)) { |
4465 | struct connection *ue = cq.c; |
4466 | |
4467 | for (struct spd_route *srue = &ue->spd; srue != NULL((void*)0); srue =srue->spd_next) { |
4468 | for (const struct spd_route *src = &c->spd; src != NULL((void*)0); src = src->spd_next) { |
4469 | if (srue->routing == RT_ROUTED_ECLIPSED && |
4470 | selector_range_eq_selector_range(src->this.client, srue->this.client) && |
4471 | selector_range_eq_selector_range(src->that.client, srue->that.client)) { |
4472 | dbg("%s eclipsed %s", c->name, ue->name){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("%s eclipsed %s", c->name, ue->name); } }; |
4473 | *esrp = srue; |
4474 | return ue; |
4475 | } |
4476 | } |
4477 | } |
4478 | } |
4479 | *esrp = NULL((void*)0); |
4480 | return NULL((void*)0); |
4481 | } |
4482 | |
4483 | /* |
4484 | * This is to support certificates with SAN using wildcard, eg SAN |
4485 | * contains DNS:*.vpnservice.com where our leftid=*.vpnservice.com |
4486 | */ |
4487 | static bool_Bool idr_wildmatch(const struct end *this, const struct id *idr, struct logger *logger) |
4488 | { |
4489 | /* check if received IDr is a valid SAN of our cert */ |
4490 | /* cert_VerifySubjectAltName, if called, will [debug]log any errors */ |
4491 | /* XXX: calling cert_VerifySubjectAltName with ID_DER_ASN1_DN futile? */ |
4492 | /* ??? if cert matches we don't actually do any further ID matching, wildcard or not */ |
4493 | if (this->cert.nss_cert != NULL((void*)0) && |
4494 | (idr->kind == ID_FQDN || idr->kind == ID_DER_ASN1_DN)) { |
4495 | diag_t d = cert_verify_subject_alt_name(this->cert.nss_cert, idr); |
4496 | if (d == NULL((void*)0)) { |
4497 | return true1; |
4498 | } |
4499 | llog_diag(RC_LOG_SERIOUS, logger, &d, "%s", ""); |
4500 | } |
4501 | |
4502 | const struct id *wild = &this->id; |
4503 | |
4504 | /* if not both ID_FQDN, fall back to same_id (no wildcarding possible) */ |
4505 | if (idr->kind != ID_FQDN || wild->kind != ID_FQDN) |
4506 | return same_id(wild, idr); |
4507 | |
4508 | size_t wl = wild->name.len; |
4509 | const char *wp = (const char *) wild->name.ptr; |
4510 | |
4511 | /* if wild has no *, fall back to same_id (no wildcard present) */ |
4512 | if (wl == 0 || wp[0] != '*') |
4513 | return same_id(wild, idr); |
4514 | |
4515 | while (wp[wl - 1] == '.') |
4516 | wl--; /* strip trailing dot */ |
4517 | |
4518 | size_t il = idr->name.len; |
4519 | const char *ip = (const char *) idr->name.ptr; |
4520 | while (il > 0 && ip[il - 1] == '.') |
4521 | il--; /* strip trailing dot */ |
4522 | |
4523 | /* |
4524 | * ??? should we require that the * match only whole components? |
4525 | * wl-1 == il || // total match |
4526 | * wl > 1 && wp[1] == '.' || // wild included leading "." |
4527 | * ip[il-(wl-1) - 1] == '.' // match preceded by "." |
4528 | */ |
4529 | |
4530 | return wl-1 <= il && strncaseeq(&wp[1], &ip[il-(wl-1)], wl-1)(strncasecmp((&wp[1]), (&ip[il-(wl-1)]), (wl-1)) == 0 ); |
4531 | } |
4532 | |
4533 | /* |
4534 | * sa priority and type should really go into kernel_sa |
4535 | * |
4536 | * Danger! While the priority used by the kernel is lowest-wins this |
4537 | * code computes the reverse, only to then subtract that from some |
4538 | * magic constant. |
4539 | */ |
4540 | uint32_t calculate_sa_prio(const struct connection *c, bool_Bool oe_shunt) |
4541 | { |
4542 | connection_buf cib; |
4543 | |
4544 | if (c->sa_priority != 0) { |
4545 | dbg("priority calculation of connection "PRI_CONNECTION" overruled by connection specification of %"PRIu32" (%#"PRIx32")",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("priority calculation of connection ""\"%s\"%s" " overruled by connection specification of %""u"" (%#""x"")", (c)->name, str_connection_instance(c, &cib), c->sa_priority , c->sa_priority); } } |
4546 | pri_connection(c, &cib), c->sa_priority, c->sa_priority){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("priority calculation of connection ""\"%s\"%s" " overruled by connection specification of %""u"" (%#""x"")", (c)->name, str_connection_instance(c, &cib), c->sa_priority , c->sa_priority); } }; |
4547 | return c->sa_priority; |
4548 | } |
4549 | |
4550 | if (LIN(POLICY_GROUP, c->policy)(((((lset_t)1 << (POLICY_GROUP_IX))) & (c->policy )) == (((lset_t)1 << (POLICY_GROUP_IX))))) { |
4551 | dbg("priority calculation of connection "PRI_CONNECTION" skipped - group template does not install SPDs",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("priority calculation of connection ""\"%s\"%s" " skipped - group template does not install SPDs", (c)->name , str_connection_instance(c, &cib)); } } |
4552 | pri_connection(c, &cib)){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("priority calculation of connection ""\"%s\"%s" " skipped - group template does not install SPDs", (c)->name , str_connection_instance(c, &cib)); } }; |
4553 | return 0; |
4554 | } |
4555 | |
4556 | /* XXX: assume unsigned >= 32-bits */ |
4557 | passert(sizeof(unsigned) >= sizeof(uint32_t))({ _Bool assertion__ = sizeof(unsigned) >= sizeof(uint32_t ); if (!assertion__) { where_t here = ({ static const struct where here = { .func = __func__, .file = "programs/pluto/connections.c" , .line = 4557, }; &here; }); const struct logger *logger_ = &failsafe_logger; llog_passert(logger_, here, "%s", "sizeof(unsigned) >= sizeof(uint32_t)" ); } (void) 1; }); |
4558 | |
4559 | unsigned portsw = /* max 2 (2 bits) */ |
4560 | (c->spd.this.port == 0 ? 0 : 1) + |
4561 | (c->spd.that.port == 0 ? 0 : 1); |
4562 | |
4563 | unsigned protow = c->spd.this.protocol == 0 ? 0 : 1; /* (1 bit) */ |
4564 | |
4565 | |
4566 | /* |
4567 | * For transport mode or /32 to /32, the client mask bits are |
4568 | * set based on the host_addr parameters. |
4569 | */ |
4570 | unsigned srcw, dstw; /* each max 128 (8 bits) */ |
4571 | srcw = c->spd.this.client.maskbits; |
4572 | dstw = c->spd.that.client.maskbits; |
4573 | /* if opportunistic, override the template destination mask with /32 or /128 */ |
4574 | if (oe_shunt) { |
4575 | dstw = (addrtypeof(&c->spd.that.host_addr) == AF_INET2) ? 32 : 128; |
4576 | } |
4577 | |
4578 | /* |
4579 | * "Ensure an instance of a template/OE-group always has |
4580 | * preference." |
4581 | * |
4582 | * Except, at this point, the polarity is reversed so the |
4583 | * below gives CK_INSTANCE lower priority. Get around this |
4584 | * for sec-labels by making the decrement bigger (except that |
4585 | * is overflowing into the DSTW bits). |
4586 | * |
4587 | * Should fix the math, but that affects all tests. |
4588 | */ |
4589 | unsigned instw = (c->kind == CK_INSTANCE && c->spd.this.sec_label.len > 0 ? 2u : |
4590 | c->kind == CK_INSTANCE ? 0u : |
4591 | 1u); |
4592 | |
4593 | unsigned pmax; |
4594 | if (LIN(POLICY_GROUPINSTANCE, c->policy)(((((lset_t)1 << (POLICY_GROUPINSTANCE_IX))) & (c-> policy)) == (((lset_t)1 << (POLICY_GROUPINSTANCE_IX))))) { |
4595 | if (LIN(POLICY_AUTH_NULL, c->policy)(((((lset_t)1 << (POLICY_AUTH_NULL_IX))) & (c->policy )) == (((lset_t)1 << (POLICY_AUTH_NULL_IX))))) { |
4596 | pmax = PLUTO_SPD_OPPO_ANON_MAX(4u * (1u << 20) - 1u); |
4597 | } else { |
4598 | pmax = PLUTO_SPD_OPPO_MAX(3u * (1u << 20) - 1u); |
4599 | } |
4600 | } else { |
4601 | pmax = PLUTO_SPD_STATIC_MAX(2u * (1u << 20) - 1u); |
4602 | } |
4603 | |
4604 | unsigned prio_hi = (portsw << 18 | protow << 17 | srcw << 9 | dstw << 1 | instw); |
4605 | unsigned prio_lo = pmax - prio_hi; |
4606 | |
4607 | dbg("priority calculation of connection "PRI_CONNECTION" is %u-%u=%u (%#x) portsw=%u protow=%u, srcw=%u dstw=%u instw=%u",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("priority calculation of connection ""\"%s\"%s" " is %u-%u=%u (%#x) portsw=%u protow=%u, srcw=%u dstw=%u instw=%u" , (c)->name, str_connection_instance(c, &cib), pmax, prio_hi , prio_lo, prio_lo, portsw, protow, srcw, dstw, instw); } } |
4608 | pri_connection(c, &cib), pmax, prio_hi, prio_lo, prio_lo,{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("priority calculation of connection ""\"%s\"%s" " is %u-%u=%u (%#x) portsw=%u protow=%u, srcw=%u dstw=%u instw=%u" , (c)->name, str_connection_instance(c, &cib), pmax, prio_hi , prio_lo, prio_lo, portsw, protow, srcw, dstw, instw); } } |
4609 | portsw, protow, srcw, dstw, instw){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("priority calculation of connection ""\"%s\"%s" " is %u-%u=%u (%#x) portsw=%u protow=%u, srcw=%u dstw=%u instw=%u" , (c)->name, str_connection_instance(c, &cib), pmax, prio_hi , prio_lo, prio_lo, portsw, protow, srcw, dstw, instw); } }; |
4610 | return prio_lo; |
4611 | } |
4612 | |
4613 | /* |
4614 | * If the connection contains a newer SA, return it. |
4615 | */ |
4616 | so_serial_t get_newer_sa_from_connection(struct state *st) |
4617 | { |
4618 | struct connection *c = st->st_connection; |
4619 | so_serial_t newest; |
4620 | |
4621 | if (IS_IKE_SA(st)((st)->st_clonedfrom == 0)) { |
4622 | newest = c->newest_ike_sa; |
4623 | dbg("picked newest_ike_sa #%lu for #%lu",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("picked newest_ike_sa #%lu for #%lu", newest, st ->st_serialno); } } |
4624 | newest, st->st_serialno){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("picked newest_ike_sa #%lu for #%lu", newest, st ->st_serialno); } }; |
4625 | } else { |
4626 | newest = c->newest_ipsec_sa; |
4627 | dbg("picked newest_ipsec_sa #%lu for #%lu",{ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("picked newest_ipsec_sa #%lu for #%lu", newest , st->st_serialno); } } |
4628 | newest, st->st_serialno){ if ((cur_debugging & (((lset_t)1 << (DBG_BASE_IX) )))) { DBG_log("picked newest_ipsec_sa #%lu for #%lu", newest , st->st_serialno); } }; |
4629 | } |
4630 | |
4631 | if (newest != SOS_NOBODY0 && newest != st->st_serialno) { |
4632 | return newest; |
4633 | } else { |
4634 | return SOS_NOBODY0; |
4635 | } |
4636 | } |
4637 | |
4638 | /* check to see that Ids of peers match */ |
4639 | bool_Bool same_peer_ids(const struct connection *c, const struct connection *d, |
4640 | const struct id *peer_id) |
4641 | { |
4642 | return same_id(&c->spd.this.id, &d->spd.this.id) && |
4643 | same_id(peer_id == NULL((void*)0) ? &c->spd.that.id : peer_id, |
4644 | &d->spd.that.id); |
4645 | } |