Annotation of embedaddon/strongswan/src/libcharon/sa/ike_sa_manager.c, revision 1.1.1.2
1.1 misho 1: /*
2: * Copyright (C) 2005-2011 Martin Willi
3: * Copyright (C) 2011 revosec AG
4: *
1.1.1.2 ! misho 5: * Copyright (C) 2008-2021 Tobias Brunner
1.1 misho 6: * Copyright (C) 2005 Jan Hutter
7: * HSR Hochschule fuer Technik Rapperswil
8: *
9: * This program is free software; you can redistribute it and/or modify it
10: * under the terms of the GNU General Public License as published by the
11: * Free Software Foundation; either version 2 of the License, or (at your
12: * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
13: *
14: * This program is distributed in the hope that it will be useful, but
15: * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16: * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17: * for more details.
18: */
19:
20: #include <string.h>
21: #include <inttypes.h>
22:
23: #include "ike_sa_manager.h"
24:
25: #include <daemon.h>
26: #include <sa/ike_sa_id.h>
27: #include <bus/bus.h>
28: #include <threading/thread.h>
29: #include <threading/condvar.h>
30: #include <threading/mutex.h>
31: #include <threading/rwlock.h>
1.1.1.2 ! misho 32: #include <collections/array.h>
1.1 misho 33: #include <collections/linked_list.h>
34: #include <crypto/hashers/hasher.h>
35: #include <processing/jobs/delete_ike_sa_job.h>
36:
37: /* the default size of the hash table (MUST be a power of 2) */
38: #define DEFAULT_HASHTABLE_SIZE 1
39:
40: /* the maximum size of the hash table (MUST be a power of 2) */
41: #define MAX_HASHTABLE_SIZE (1 << 30)
42:
43: /* the default number of segments (MUST be a power of 2) */
44: #define DEFAULT_SEGMENT_COUNT 1
45:
46: typedef struct entry_t entry_t;
47:
48: /**
49: * An entry in the linked list, contains IKE_SA, locking and lookup data.
50: */
51: struct entry_t {
52:
53: /**
54: * Number of threads waiting for this ike_sa_t object.
55: */
56: int waiting_threads;
57:
58: /**
59: * Condvar where threads can wait until ike_sa_t object is free for use again.
60: */
61: condvar_t *condvar;
62:
63: /**
64: * Thread by which this IKE_SA is currently checked out, if any
65: */
66: thread_t *checked_out;
67:
68: /**
69: * Does this SA drives out new threads?
70: */
71: bool driveout_new_threads;
72:
73: /**
74: * Does this SA drives out waiting threads?
75: */
76: bool driveout_waiting_threads;
77:
78: /**
79: * Identification of an IKE_SA (SPIs).
80: */
81: ike_sa_id_t *ike_sa_id;
82:
83: /**
84: * The contained ike_sa_t object.
85: */
86: ike_sa_t *ike_sa;
87:
88: /**
89: * hash of the IKE_SA_INIT message, used to detect retransmissions
90: */
91: chunk_t init_hash;
92:
93: /**
94: * remote host address, required for DoS detection and duplicate
95: * checking (host with same my_id and other_id is *not* considered
96: * a duplicate if the address family differs)
97: */
98: host_t *other;
99:
100: /**
101: * As responder: Is this SA half-open?
102: */
103: bool half_open;
104:
105: /**
106: * own identity, required for duplicate checking
107: */
108: identification_t *my_id;
109:
110: /**
111: * remote identity, required for duplicate checking
112: */
113: identification_t *other_id;
114:
115: /**
116: * message ID or hash of currently processing message, -1 if none
117: */
118: uint32_t processing;
119: };
120:
121: /**
122: * Implementation of entry_t.destroy.
123: */
124: static status_t entry_destroy(entry_t *this)
125: {
126: /* also destroy IKE SA */
127: this->ike_sa->destroy(this->ike_sa);
128: this->ike_sa_id->destroy(this->ike_sa_id);
129: chunk_free(&this->init_hash);
130: DESTROY_IF(this->other);
131: DESTROY_IF(this->my_id);
132: DESTROY_IF(this->other_id);
133: this->condvar->destroy(this->condvar);
134: free(this);
135: return SUCCESS;
136: }
137:
138: /**
139: * Creates a new entry for the ike_sa_t list.
140: */
141: static entry_t *entry_create()
142: {
143: entry_t *this;
144:
145: INIT(this,
146: .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
147: .processing = -1,
148: );
149:
150: return this;
151: }
152:
153: /**
154: * Function that matches entry_t objects by ike_sa_id_t.
155: */
156: static bool entry_match_by_id(entry_t *entry, void *arg)
157: {
158: ike_sa_id_t *id = arg;
159:
160: if (id->equals(id, entry->ike_sa_id))
161: {
162: return TRUE;
163: }
164: if ((id->get_responder_spi(id) == 0 ||
165: entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
166: (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
167: id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
168: id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
169: {
170: /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
171: return TRUE;
172: }
173: return FALSE;
174: }
175:
176: /**
177: * Function that matches entry_t objects by ike_sa_t pointers.
178: */
179: static bool entry_match_by_sa(entry_t *entry, void *ike_sa)
180: {
181: return entry->ike_sa == ike_sa;
182: }
183:
184: /**
185: * Hash function for ike_sa_id_t objects.
186: */
187: static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
188: {
189: /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
190: * locally unique, so we use our randomly allocated SPI whether we are
191: * initiator or responder to ensure a good distribution. The latter is not
192: * possible for IKEv1 as we don't know whether we are original initiator or
193: * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
194: * SPIs (Cookies) to be allocated near random (we allocate them randomly
195: * anyway) it seems safe to always use the initiator SPI. */
196: if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
197: ike_sa_id->is_initiator(ike_sa_id))
198: {
199: return ike_sa_id->get_initiator_spi(ike_sa_id);
200: }
201: return ike_sa_id->get_responder_spi(ike_sa_id);
202: }
203:
204: typedef struct half_open_t half_open_t;
205:
206: /**
207: * Struct to manage half-open IKE_SAs per peer.
208: */
209: struct half_open_t {
210: /** chunk of remote host address */
211: chunk_t other;
212:
213: /** the number of half-open IKE_SAs with that host */
214: u_int count;
215:
216: /** the number of half-open IKE_SAs we responded to with that host */
217: u_int count_responder;
218: };
219:
220: /**
221: * Destroys a half_open_t object.
222: */
223: static void half_open_destroy(half_open_t *this)
224: {
225: chunk_free(&this->other);
226: free(this);
227: }
228:
229: typedef struct connected_peers_t connected_peers_t;
230:
231: struct connected_peers_t {
232: /** own identity */
233: identification_t *my_id;
234:
235: /** remote identity */
236: identification_t *other_id;
237:
238: /** ip address family of peer */
239: int family;
240:
241: /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
242: linked_list_t *sas;
243: };
244:
245: static void connected_peers_destroy(connected_peers_t *this)
246: {
247: this->my_id->destroy(this->my_id);
248: this->other_id->destroy(this->other_id);
249: this->sas->destroy(this->sas);
250: free(this);
251: }
252:
253: /**
254: * Function that matches connected_peers_t objects by the given ids.
255: */
256: static inline bool connected_peers_match(connected_peers_t *connected_peers,
257: identification_t *my_id, identification_t *other_id,
258: int family)
259: {
260: return my_id->equals(my_id, connected_peers->my_id) &&
261: other_id->equals(other_id, connected_peers->other_id) &&
262: (!family || family == connected_peers->family);
263: }
264:
265: typedef struct init_hash_t init_hash_t;
266:
267: struct init_hash_t {
268: /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
269: chunk_t hash;
270:
271: /** our SPI allocated for the IKE_SA based on this message */
272: uint64_t our_spi;
273: };
274:
275: typedef struct segment_t segment_t;
276:
277: /**
278: * Struct to manage segments of the hash table.
279: */
280: struct segment_t {
281: /** mutex to access a segment exclusively */
282: mutex_t *mutex;
283: };
284:
285: typedef struct shareable_segment_t shareable_segment_t;
286:
287: /**
288: * Struct to manage segments of the "half-open" and "connected peers" hash tables.
289: */
290: struct shareable_segment_t {
291: /** rwlock to access a segment non-/exclusively */
292: rwlock_t *lock;
293:
294: /** the number of entries in this segment - in case of the "half-open table"
295: * it's the sum of all half_open_t.count in a segment. */
296: u_int count;
297: };
298:
299: typedef struct table_item_t table_item_t;
300:
301: /**
302: * Instead of using linked_list_t for each bucket we store the data in our own
303: * list to save memory.
304: */
305: struct table_item_t {
306: /** data of this item */
307: void *value;
308:
309: /** next item in the overflow list */
310: table_item_t *next;
311: };
312:
313: typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
314:
315: /**
316: * Additional private members of ike_sa_manager_t.
317: */
318: struct private_ike_sa_manager_t {
319: /**
320: * Public interface of ike_sa_manager_t.
321: */
322: ike_sa_manager_t public;
323:
324: /**
325: * Hash table with entries for the ike_sa_t objects.
326: */
327: table_item_t **ike_sa_table;
328:
329: /**
330: * The size of the hash table.
331: */
332: u_int table_size;
333:
334: /**
335: * Mask to map the hashes to table rows.
336: */
337: u_int table_mask;
338:
339: /**
340: * Segments of the hash table.
341: */
342: segment_t *segments;
343:
344: /**
345: * The number of segments.
346: */
347: u_int segment_count;
348:
349: /**
350: * Mask to map a table row to a segment.
351: */
352: u_int segment_mask;
353:
354: /**
355: * Hash table with half_open_t objects.
356: */
357: table_item_t **half_open_table;
358:
359: /**
360: * Segments of the "half-open" hash table.
361: */
362: shareable_segment_t *half_open_segments;
363:
364: /**
365: * Total number of half-open IKE_SAs.
366: */
367: refcount_t half_open_count;
368:
369: /**
370: * Total number of half-open IKE_SAs as responder.
371: */
372: refcount_t half_open_count_responder;
373:
374: /**
375: * Total number of IKE_SAs registered with IKE_SA manager.
376: */
377: refcount_t total_sa_count;
378:
379: /**
380: * Hash table with connected_peers_t objects.
381: */
382: table_item_t **connected_peers_table;
383:
384: /**
385: * Segments of the "connected peers" hash table.
386: */
387: shareable_segment_t *connected_peers_segments;
388:
389: /**
390: * Hash table with init_hash_t objects.
391: */
392: table_item_t **init_hashes_table;
393:
394: /**
1.1.1.2 ! misho 395: * Segments of the "hashes" hash table.
1.1 misho 396: */
397: segment_t *init_hashes_segments;
398:
399: /**
1.1.1.2 ! misho 400: * Configs for which an SA is currently being checked out.
! 401: */
! 402: array_t *config_checkouts;
! 403:
! 404: /**
! 405: * Mutex to protect access to configs.
! 406: */
! 407: mutex_t *config_mutex;
! 408:
! 409: /**
! 410: * Condvar to indicate changes in checkout configs.
! 411: */
! 412: condvar_t *config_condvar;
! 413:
! 414: /**
1.1 misho 415: * RNG to get random SPIs for our side
416: */
417: rng_t *rng;
418:
419: /**
420: * Registered callback for IKE SPIs
421: */
422: struct {
423: spi_cb_t cb;
424: void *data;
425: } spi_cb;
426:
427: /**
428: * Lock to access the RNG instance and the callback
429: */
430: rwlock_t *spi_lock;
431:
432: /**
433: * Mask applied to local SPIs before mixing in the label
434: */
435: uint64_t spi_mask;
436:
437: /**
438: * Label applied to local SPIs
439: */
440: uint64_t spi_label;
441:
442: /**
443: * reuse existing IKE_SAs in checkout_by_config
444: */
445: bool reuse_ikesa;
446:
447: /**
448: * Configured IKE_SA limit, if any
449: */
450: u_int ikesa_limit;
451: };
452:
453: /**
454: * Acquire a lock to access the segment of the table row with the given index.
455: * It also works with the segment index directly.
456: */
457: static inline void lock_single_segment(private_ike_sa_manager_t *this,
458: u_int index)
459: {
460: mutex_t *lock = this->segments[index & this->segment_mask].mutex;
461: lock->lock(lock);
462: }
463:
464: /**
465: * Release the lock required to access the segment of the table row with the given index.
466: * It also works with the segment index directly.
467: */
468: static inline void unlock_single_segment(private_ike_sa_manager_t *this,
469: u_int index)
470: {
471: mutex_t *lock = this->segments[index & this->segment_mask].mutex;
472: lock->unlock(lock);
473: }
474:
475: /**
476: * Lock all segments
477: */
478: static void lock_all_segments(private_ike_sa_manager_t *this)
479: {
480: u_int i;
481:
482: for (i = 0; i < this->segment_count; i++)
483: {
484: this->segments[i].mutex->lock(this->segments[i].mutex);
485: }
486: }
487:
488: /**
489: * Unlock all segments
490: */
491: static void unlock_all_segments(private_ike_sa_manager_t *this)
492: {
493: u_int i;
494:
495: for (i = 0; i < this->segment_count; i++)
496: {
497: this->segments[i].mutex->unlock(this->segments[i].mutex);
498: }
499: }
500:
501: typedef struct private_enumerator_t private_enumerator_t;
502:
503: /**
504: * hash table enumerator implementation
505: */
506: struct private_enumerator_t {
507:
508: /**
509: * implements enumerator interface
510: */
511: enumerator_t enumerator;
512:
513: /**
514: * associated ike_sa_manager_t
515: */
516: private_ike_sa_manager_t *manager;
517:
518: /**
519: * current segment index
520: */
521: u_int segment;
522:
523: /**
524: * currently enumerating entry
525: */
526: entry_t *entry;
527:
528: /**
529: * current table row index
530: */
531: u_int row;
532:
533: /**
534: * current table item
535: */
536: table_item_t *current;
537:
538: /**
539: * previous table item
540: */
541: table_item_t *prev;
542: };
543:
544: METHOD(enumerator_t, enumerate, bool,
545: private_enumerator_t *this, va_list args)
546: {
547: entry_t **entry;
548: u_int *segment;
549:
550: VA_ARGS_VGET(args, entry, segment);
551:
552: if (this->entry)
553: {
554: this->entry->condvar->signal(this->entry->condvar);
555: this->entry = NULL;
556: }
557: while (this->segment < this->manager->segment_count)
558: {
559: while (this->row < this->manager->table_size)
560: {
561: this->prev = this->current;
562: if (this->current)
563: {
564: this->current = this->current->next;
565: }
566: else
567: {
568: lock_single_segment(this->manager, this->segment);
569: this->current = this->manager->ike_sa_table[this->row];
570: }
571: if (this->current)
572: {
573: *entry = this->entry = this->current->value;
574: *segment = this->segment;
575: return TRUE;
576: }
577: unlock_single_segment(this->manager, this->segment);
578: this->row += this->manager->segment_count;
579: }
580: this->segment++;
581: this->row = this->segment;
582: }
583: return FALSE;
584: }
585:
586: METHOD(enumerator_t, enumerator_destroy, void,
587: private_enumerator_t *this)
588: {
589: if (this->entry)
590: {
591: this->entry->condvar->signal(this->entry->condvar);
592: }
593: if (this->current)
594: {
595: unlock_single_segment(this->manager, this->segment);
596: }
597: free(this);
598: }
599:
600: /**
601: * Creates an enumerator to enumerate the entries in the hash table.
602: */
603: static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
604: {
605: private_enumerator_t *enumerator;
606:
607: INIT(enumerator,
608: .enumerator = {
609: .enumerate = enumerator_enumerate_default,
610: .venumerate = _enumerate,
611: .destroy = _enumerator_destroy,
612: },
613: .manager = this,
614: );
615: return &enumerator->enumerator;
616: }
617:
618: /**
619: * Put an entry into the hash table.
620: * Note: The caller has to unlock the returned segment.
621: */
622: static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
623: {
624: table_item_t *current, *item;
625: u_int row, segment;
626:
627: INIT(item,
628: .value = entry,
629: );
630:
631: row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
632: segment = row & this->segment_mask;
633:
634: lock_single_segment(this, segment);
635: current = this->ike_sa_table[row];
636: if (current)
637: { /* insert at the front of current bucket */
638: item->next = current;
639: }
640: this->ike_sa_table[row] = item;
641: ref_get(&this->total_sa_count);
642: return segment;
643: }
644:
645: /**
646: * Remove an entry from the hash table.
647: * Note: The caller MUST have a lock on the segment of this entry.
648: */
649: static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
650: {
651: table_item_t *item, *prev = NULL;
652: u_int row;
653:
654: row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
655: item = this->ike_sa_table[row];
656: while (item)
657: {
658: if (item->value == entry)
659: {
660: if (prev)
661: {
662: prev->next = item->next;
663: }
664: else
665: {
666: this->ike_sa_table[row] = item->next;
667: }
668: ignore_result(ref_put(&this->total_sa_count));
669: free(item);
670: break;
671: }
672: prev = item;
673: item = item->next;
674: }
675: }
676:
677: /**
678: * Remove the entry at the current enumerator position.
679: */
680: static void remove_entry_at(private_enumerator_t *this)
681: {
682: this->entry = NULL;
683: if (this->current)
684: {
685: table_item_t *current = this->current;
686:
687: ignore_result(ref_put(&this->manager->total_sa_count));
688: this->current = this->prev;
689:
690: if (this->prev)
691: {
692: this->prev->next = current->next;
693: }
694: else
695: {
696: this->manager->ike_sa_table[this->row] = current->next;
697: unlock_single_segment(this->manager, this->segment);
698: }
699: free(current);
700: }
701: }
702:
703: /**
704: * Find an entry using the provided match function to compare the entries for
705: * equality.
706: */
707: static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
708: ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
709: bool (*match)(entry_t*,void*), void *param)
710: {
711: table_item_t *item;
712: u_int row, seg;
713:
714: row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
715: seg = row & this->segment_mask;
716:
717: lock_single_segment(this, seg);
718: item = this->ike_sa_table[row];
719: while (item)
720: {
721: if (match(item->value, param))
722: {
723: *entry = item->value;
724: *segment = seg;
725: /* the locked segment has to be unlocked by the caller */
726: return SUCCESS;
727: }
728: item = item->next;
729: }
730: unlock_single_segment(this, seg);
731: return NOT_FOUND;
732: }
733:
734: /**
735: * Find an entry by ike_sa_id_t.
736: * Note: On SUCCESS, the caller has to unlock the segment.
737: */
738: static status_t get_entry_by_id(private_ike_sa_manager_t *this,
739: ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
740: {
741: return get_entry_by_match_function(this, ike_sa_id, entry, segment,
742: entry_match_by_id, ike_sa_id);
743: }
744:
745: /**
746: * Find an entry by IKE_SA pointer.
747: * Note: On SUCCESS, the caller has to unlock the segment.
748: */
749: static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
750: ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
751: {
752: return get_entry_by_match_function(this, ike_sa_id, entry, segment,
753: entry_match_by_sa, ike_sa);
754: }
755:
756: /**
757: * Wait until no other thread is using an IKE_SA, return FALSE if entry not
758: * acquirable.
759: */
760: static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
761: u_int segment)
762: {
763: if (entry->driveout_new_threads)
764: {
765: /* we are not allowed to get this */
766: return FALSE;
767: }
768: while (entry->checked_out && !entry->driveout_waiting_threads)
769: {
770: /* so wait until we can get it for us.
771: * we register us as waiting. */
772: entry->waiting_threads++;
773: entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
774: entry->waiting_threads--;
775: }
776: /* hm, a deletion request forbids us to get this SA, get next one */
777: if (entry->driveout_waiting_threads)
778: {
779: /* we must signal here, others may be waiting on it, too */
780: entry->condvar->signal(entry->condvar);
781: return FALSE;
782: }
783: return TRUE;
784: }
785:
786: /**
787: * Put a half-open SA into the hash table.
788: */
789: static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
790: {
791: table_item_t *item;
792: u_int row, segment;
793: rwlock_t *lock;
794: ike_sa_id_t *ike_id;
795: half_open_t *half_open;
796: chunk_t addr;
797:
798: ike_id = entry->ike_sa_id;
799: addr = entry->other->get_address(entry->other);
800: row = chunk_hash(addr) & this->table_mask;
801: segment = row & this->segment_mask;
802: lock = this->half_open_segments[segment].lock;
803: lock->write_lock(lock);
804: item = this->half_open_table[row];
805: while (item)
806: {
807: half_open = item->value;
808:
809: if (chunk_equals(addr, half_open->other))
810: {
811: break;
812: }
813: item = item->next;
814: }
815:
816: if (!item)
817: {
818: INIT(half_open,
819: .other = chunk_clone(addr),
820: );
821: INIT(item,
822: .value = half_open,
823: .next = this->half_open_table[row],
824: );
825: this->half_open_table[row] = item;
826: }
827: half_open->count++;
828: ref_get(&this->half_open_count);
829: if (!ike_id->is_initiator(ike_id))
830: {
831: half_open->count_responder++;
832: ref_get(&this->half_open_count_responder);
833: }
834: this->half_open_segments[segment].count++;
835: lock->unlock(lock);
836: }
837:
838: /**
839: * Remove a half-open SA from the hash table.
840: */
841: static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
842: {
843: table_item_t *item, *prev = NULL;
844: u_int row, segment;
845: rwlock_t *lock;
846: ike_sa_id_t *ike_id;
847: chunk_t addr;
848:
849: ike_id = entry->ike_sa_id;
850: addr = entry->other->get_address(entry->other);
851: row = chunk_hash(addr) & this->table_mask;
852: segment = row & this->segment_mask;
853: lock = this->half_open_segments[segment].lock;
854: lock->write_lock(lock);
855: item = this->half_open_table[row];
856: while (item)
857: {
858: half_open_t *half_open = item->value;
859:
860: if (chunk_equals(addr, half_open->other))
861: {
862: if (!ike_id->is_initiator(ike_id))
863: {
864: half_open->count_responder--;
865: ignore_result(ref_put(&this->half_open_count_responder));
866: }
867: ignore_result(ref_put(&this->half_open_count));
868: if (--half_open->count == 0)
869: {
870: if (prev)
871: {
872: prev->next = item->next;
873: }
874: else
875: {
876: this->half_open_table[row] = item->next;
877: }
878: half_open_destroy(half_open);
879: free(item);
880: }
881: this->half_open_segments[segment].count--;
882: break;
883: }
884: prev = item;
885: item = item->next;
886: }
887: lock->unlock(lock);
888: }
889:
1.1.1.2 ! misho 890: /**
! 891: * Create an entry and put it into the hash table.
! 892: * Note: The caller has to unlock the segment.
! 893: */
! 894: static u_int create_and_put_entry(private_ike_sa_manager_t *this,
! 895: ike_sa_t *ike_sa, entry_t **entry)
! 896: {
! 897: ike_sa_id_t *ike_sa_id = ike_sa->get_id(ike_sa);
! 898: host_t *other = ike_sa->get_other_host(ike_sa);
! 899:
! 900: *entry = entry_create();
! 901: (*entry)->ike_sa_id = ike_sa_id->clone(ike_sa_id);
! 902: (*entry)->ike_sa = ike_sa;
! 903:
! 904: if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
! 905: {
! 906: (*entry)->half_open = TRUE;
! 907: (*entry)->other = other->clone(other);
! 908: put_half_open(this, *entry);
! 909: }
! 910: return put_entry(this, *entry);
! 911: }
! 912:
1.1 misho 913: CALLBACK(id_matches, bool,
914: ike_sa_id_t *a, va_list args)
915: {
916: ike_sa_id_t *b;
917:
918: VA_ARGS_VGET(args, b);
919: return a->equals(a, b);
920: }
921:
922: /**
923: * Put an SA between two peers into the hash table.
924: */
925: static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
926: {
927: table_item_t *item;
928: u_int row, segment;
929: rwlock_t *lock;
930: connected_peers_t *connected_peers;
931: chunk_t my_id, other_id;
932: int family;
933:
934: my_id = entry->my_id->get_encoding(entry->my_id);
935: other_id = entry->other_id->get_encoding(entry->other_id);
936: family = entry->other->get_family(entry->other);
937: row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
938: segment = row & this->segment_mask;
939: lock = this->connected_peers_segments[segment].lock;
940: lock->write_lock(lock);
941: item = this->connected_peers_table[row];
942: while (item)
943: {
944: connected_peers = item->value;
945:
946: if (connected_peers_match(connected_peers, entry->my_id,
947: entry->other_id, family))
948: {
949: if (connected_peers->sas->find_first(connected_peers->sas,
950: id_matches, NULL, entry->ike_sa_id))
951: {
952: lock->unlock(lock);
953: return;
954: }
955: break;
956: }
957: item = item->next;
958: }
959:
960: if (!item)
961: {
962: INIT(connected_peers,
963: .my_id = entry->my_id->clone(entry->my_id),
964: .other_id = entry->other_id->clone(entry->other_id),
965: .family = family,
966: .sas = linked_list_create(),
967: );
968: INIT(item,
969: .value = connected_peers,
970: .next = this->connected_peers_table[row],
971: );
972: this->connected_peers_table[row] = item;
973: }
974: connected_peers->sas->insert_last(connected_peers->sas,
975: entry->ike_sa_id->clone(entry->ike_sa_id));
976: this->connected_peers_segments[segment].count++;
977: lock->unlock(lock);
978: }
979:
980: /**
981: * Remove an SA between two peers from the hash table.
982: */
983: static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
984: {
985: table_item_t *item, *prev = NULL;
986: u_int row, segment;
987: rwlock_t *lock;
988: chunk_t my_id, other_id;
989: int family;
990:
991: my_id = entry->my_id->get_encoding(entry->my_id);
992: other_id = entry->other_id->get_encoding(entry->other_id);
993: family = entry->other->get_family(entry->other);
994:
995: row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
996: segment = row & this->segment_mask;
997:
998: lock = this->connected_peers_segments[segment].lock;
999: lock->write_lock(lock);
1000: item = this->connected_peers_table[row];
1001: while (item)
1002: {
1003: connected_peers_t *current = item->value;
1004:
1005: if (connected_peers_match(current, entry->my_id, entry->other_id,
1006: family))
1007: {
1008: enumerator_t *enumerator;
1009: ike_sa_id_t *ike_sa_id;
1010:
1011: enumerator = current->sas->create_enumerator(current->sas);
1012: while (enumerator->enumerate(enumerator, &ike_sa_id))
1013: {
1014: if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
1015: {
1016: current->sas->remove_at(current->sas, enumerator);
1017: ike_sa_id->destroy(ike_sa_id);
1018: this->connected_peers_segments[segment].count--;
1019: break;
1020: }
1021: }
1022: enumerator->destroy(enumerator);
1023: if (current->sas->get_count(current->sas) == 0)
1024: {
1025: if (prev)
1026: {
1027: prev->next = item->next;
1028: }
1029: else
1030: {
1031: this->connected_peers_table[row] = item->next;
1032: }
1033: connected_peers_destroy(current);
1034: free(item);
1035: }
1036: break;
1037: }
1038: prev = item;
1039: item = item->next;
1040: }
1041: lock->unlock(lock);
1042: }
1043:
1044: /**
1045: * Get a random SPI for new IKE_SAs
1046: */
1047: static uint64_t get_spi(private_ike_sa_manager_t *this)
1048: {
1049: uint64_t spi;
1050:
1051: this->spi_lock->read_lock(this->spi_lock);
1052: if (this->spi_cb.cb)
1053: {
1054: spi = this->spi_cb.cb(this->spi_cb.data);
1055: }
1056: else if (!this->rng ||
1057: !this->rng->get_bytes(this->rng, sizeof(spi), (uint8_t*)&spi))
1058: {
1059: spi = 0;
1060: }
1061: this->spi_lock->unlock(this->spi_lock);
1062:
1063: if (spi)
1064: {
1065: spi = (spi & ~this->spi_mask) | this->spi_label;
1066: }
1067: return spi;
1068: }
1069:
1070: /**
1071: * Calculate the hash of the initial IKE message. Memory for the hash is
1072: * allocated on success.
1073: *
1074: * @returns TRUE on success
1075: */
1076: static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1077: {
1078: host_t *src;
1079:
1080: if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1081: { /* only hash the source IP, port and SPI for fragmented init messages */
1082: uint16_t port;
1083: uint64_t spi;
1084:
1085: src = message->get_source(message);
1086: if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1087: {
1088: return FALSE;
1089: }
1090: port = src->get_port(src);
1091: if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1092: {
1093: return FALSE;
1094: }
1095: spi = message->get_initiator_spi(message);
1096: return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1097: }
1098: if (message->get_exchange_type(message) == ID_PROT)
1099: { /* include the source for Main Mode as the hash will be the same if
1100: * SPIs are reused by two initiators that use the same proposal */
1101: src = message->get_source(message);
1102:
1103: if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1104: {
1105: return FALSE;
1106: }
1107: }
1108: return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1109: }
1110:
1111: /**
1112: * Check if we already have created an IKE_SA based on the initial IKE message
1113: * with the given hash.
1114: * If not the hash is stored, the hash data is not(!) cloned.
1115: *
1116: * Also, the local SPI is returned. In case of a retransmit this is already
1117: * stored together with the hash, otherwise it is newly allocated and should
1118: * be used to create the IKE_SA.
1119: *
1120: * @returns ALREADY_DONE if the message with the given hash has been seen before
1121: * NOT_FOUND if the message hash was not found
1122: * FAILED if the SPI allocation failed
1123: */
1124: static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1125: chunk_t init_hash, uint64_t *our_spi)
1126: {
1127: table_item_t *item;
1128: u_int row, segment;
1129: mutex_t *mutex;
1130: init_hash_t *init;
1131: uint64_t spi;
1132:
1133: row = chunk_hash(init_hash) & this->table_mask;
1134: segment = row & this->segment_mask;
1135: mutex = this->init_hashes_segments[segment].mutex;
1136: mutex->lock(mutex);
1137: item = this->init_hashes_table[row];
1138: while (item)
1139: {
1140: init_hash_t *current = item->value;
1141:
1142: if (chunk_equals(init_hash, current->hash))
1143: {
1144: *our_spi = current->our_spi;
1145: mutex->unlock(mutex);
1146: return ALREADY_DONE;
1147: }
1148: item = item->next;
1149: }
1150:
1151: spi = get_spi(this);
1152: if (!spi)
1153: {
1154: return FAILED;
1155: }
1156:
1157: INIT(init,
1158: .hash = {
1159: .len = init_hash.len,
1160: .ptr = init_hash.ptr,
1161: },
1162: .our_spi = spi,
1163: );
1164: INIT(item,
1165: .value = init,
1166: .next = this->init_hashes_table[row],
1167: );
1168: this->init_hashes_table[row] = item;
1169: *our_spi = init->our_spi;
1170: mutex->unlock(mutex);
1171: return NOT_FOUND;
1172: }
1173:
1174: /**
1175: * Remove the hash of an initial IKE message from the cache.
1176: */
1177: static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1178: {
1179: table_item_t *item, *prev = NULL;
1180: u_int row, segment;
1181: mutex_t *mutex;
1182:
1183: row = chunk_hash(init_hash) & this->table_mask;
1184: segment = row & this->segment_mask;
1185: mutex = this->init_hashes_segments[segment].mutex;
1186: mutex->lock(mutex);
1187: item = this->init_hashes_table[row];
1188: while (item)
1189: {
1190: init_hash_t *current = item->value;
1191:
1192: if (chunk_equals(init_hash, current->hash))
1193: {
1194: if (prev)
1195: {
1196: prev->next = item->next;
1197: }
1198: else
1199: {
1200: this->init_hashes_table[row] = item->next;
1201: }
1202: free(current);
1203: free(item);
1204: break;
1205: }
1206: prev = item;
1207: item = item->next;
1208: }
1209: mutex->unlock(mutex);
1210: }
1211:
1212: METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1213: private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1214: {
1215: ike_sa_t *ike_sa = NULL;
1216: entry_t *entry;
1217: u_int segment;
1218:
1219: DBG2(DBG_MGR, "checkout %N SA with SPIs %.16"PRIx64"_i %.16"PRIx64"_r",
1220: ike_version_names, ike_sa_id->get_ike_version(ike_sa_id),
1221: be64toh(ike_sa_id->get_initiator_spi(ike_sa_id)),
1222: be64toh(ike_sa_id->get_responder_spi(ike_sa_id)));
1223:
1224: if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1225: {
1226: if (wait_for_entry(this, entry, segment))
1227: {
1228: entry->checked_out = thread_current();
1229: ike_sa = entry->ike_sa;
1230: DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1231: ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1232: }
1233: unlock_single_segment(this, segment);
1234: }
1235: charon->bus->set_sa(charon->bus, ike_sa);
1236:
1237: if (!ike_sa)
1238: {
1239: DBG2(DBG_MGR, "IKE_SA checkout not successful");
1240: }
1241: return ike_sa;
1242: }
1243:
1.1.1.2 ! misho 1244: METHOD(ike_sa_manager_t, create_new, ike_sa_t*,
1.1 misho 1245: private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1246: {
1247: ike_sa_id_t *ike_sa_id;
1248: ike_sa_t *ike_sa;
1249: uint8_t ike_version;
1250: uint64_t spi;
1251:
1252: ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1253:
1254: spi = get_spi(this);
1255: if (!spi)
1256: {
1257: DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1258: return NULL;
1259: }
1260:
1261: if (initiator)
1262: {
1263: ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1264: }
1265: else
1266: {
1267: ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1268: }
1269: ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1270: ike_sa_id->destroy(ike_sa_id);
1271:
1272: if (ike_sa)
1273: {
1274: DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1275: ike_sa->get_unique_id(ike_sa));
1276: }
1277: return ike_sa;
1278: }
1279:
1.1.1.2 ! misho 1280: METHOD(ike_sa_manager_t, checkout_new, void,
! 1281: private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
! 1282: {
! 1283: u_int segment;
! 1284: entry_t *entry;
! 1285:
! 1286: segment = create_and_put_entry(this, ike_sa, &entry);
! 1287: entry->checked_out = thread_current();
! 1288: unlock_single_segment(this, segment);
! 1289: }
! 1290:
1.1 misho 1291: /**
1292: * Get the message ID or message hash to detect early retransmissions
1293: */
1294: static uint32_t get_message_id_or_hash(message_t *message)
1295: {
1296: if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1297: {
1298: /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1299: * Mode, where all three messages use the same message ID */
1300: if (message->get_message_id(message) == 0 ||
1301: message->get_exchange_type(message) == QUICK_MODE)
1302: {
1303: return chunk_hash(message->get_packet_data(message));
1304: }
1305: }
1306: return message->get_message_id(message);
1307: }
1308:
1309: METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1310: private_ike_sa_manager_t* this, message_t *message)
1311: {
1312: u_int segment;
1313: entry_t *entry;
1314: ike_sa_t *ike_sa = NULL;
1315: ike_sa_id_t *id;
1316: ike_version_t ike_version;
1317: bool is_init = FALSE;
1318:
1319: id = message->get_ike_sa_id(message);
1320: /* clone the IKE_SA ID so we can modify the initiator flag */
1321: id = id->clone(id);
1322: id->switch_initiator(id);
1323:
1324: DBG2(DBG_MGR, "checkout %N SA by message with SPIs %.16"PRIx64"_i "
1325: "%.16"PRIx64"_r", ike_version_names, id->get_ike_version(id),
1326: be64toh(id->get_initiator_spi(id)),
1327: be64toh(id->get_responder_spi(id)));
1328:
1329: if (id->get_responder_spi(id) == 0 &&
1330: message->get_message_id(message) == 0)
1331: {
1332: if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1333: {
1334: if (message->get_exchange_type(message) == IKE_SA_INIT &&
1335: message->get_request(message))
1336: {
1337: ike_version = IKEV2;
1338: is_init = TRUE;
1339: }
1340: }
1341: else
1342: {
1343: if (message->get_exchange_type(message) == ID_PROT ||
1344: message->get_exchange_type(message) == AGGRESSIVE)
1345: {
1346: ike_version = IKEV1;
1347: is_init = TRUE;
1348: if (id->is_initiator(id))
1349: { /* not set in IKEv1, switch back before applying to new SA */
1350: id->switch_initiator(id);
1351: }
1352: }
1353: }
1354: }
1355:
1356: if (is_init)
1357: {
1358: hasher_t *hasher;
1359: uint64_t our_spi;
1360: chunk_t hash;
1361:
1362: hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1363: if (!hasher || !get_init_hash(hasher, message, &hash))
1364: {
1365: DBG1(DBG_MGR, "ignoring message, failed to hash message");
1366: DESTROY_IF(hasher);
1367: id->destroy(id);
1368: goto out;
1369: }
1370: hasher->destroy(hasher);
1371:
1372: /* ensure this is not a retransmit of an already handled init message */
1373: switch (check_and_put_init_hash(this, hash, &our_spi))
1374: {
1375: case NOT_FOUND:
1376: { /* we've not seen this packet yet, create a new IKE_SA */
1377: if (!this->ikesa_limit ||
1378: this->public.get_count(&this->public) < this->ikesa_limit)
1379: {
1380: id->set_responder_spi(id, our_spi);
1381: ike_sa = ike_sa_create(id, FALSE, ike_version);
1382: if (ike_sa)
1383: {
1384: entry = entry_create();
1385: entry->ike_sa = ike_sa;
1386: entry->ike_sa_id = id;
1387: entry->processing = get_message_id_or_hash(message);
1388: entry->init_hash = hash;
1389:
1390: segment = put_entry(this, entry);
1391: entry->checked_out = thread_current();
1392: unlock_single_segment(this, segment);
1393:
1394: DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1395: ike_sa->get_name(ike_sa),
1396: ike_sa->get_unique_id(ike_sa));
1397: goto out;
1398: }
1399: else
1400: {
1401: DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1402: }
1403: }
1404: else
1405: {
1406: DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1407: exchange_type_names, message->get_exchange_type(message),
1408: this->ikesa_limit);
1409: }
1410: remove_init_hash(this, hash);
1411: chunk_free(&hash);
1412: id->destroy(id);
1413: goto out;
1414: }
1415: case FAILED:
1416: { /* we failed to allocate an SPI */
1417: chunk_free(&hash);
1418: id->destroy(id);
1419: DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1420: goto out;
1421: }
1422: case ALREADY_DONE:
1423: default:
1424: break;
1425: }
1426: /* it looks like we already handled this init message to some degree */
1427: id->set_responder_spi(id, our_spi);
1428: chunk_free(&hash);
1429: }
1430:
1431: if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1432: {
1433: /* only check out if we are not already processing it. */
1434: if (entry->processing == get_message_id_or_hash(message))
1435: {
1436: DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1437: entry->processing);
1438: }
1439: else if (wait_for_entry(this, entry, segment))
1440: {
1441: ike_sa_id_t *ike_id;
1442:
1443: ike_id = entry->ike_sa->get_id(entry->ike_sa);
1444: entry->checked_out = thread_current();
1445: if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1446: message->get_first_payload_type(message) != PLV2_FRAGMENT)
1447: { /* TODO-FRAG: this fails if there are unencrypted payloads */
1448: entry->processing = get_message_id_or_hash(message);
1449: }
1450: if (ike_id->get_responder_spi(ike_id) == 0)
1451: {
1452: ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1453: }
1454: ike_sa = entry->ike_sa;
1455: DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1456: ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1457: }
1458: unlock_single_segment(this, segment);
1459: }
1460: else
1461: {
1462: charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1463: }
1464: id->destroy(id);
1465:
1466: out:
1467: charon->bus->set_sa(charon->bus, ike_sa);
1468: if (!ike_sa)
1469: {
1470: DBG2(DBG_MGR, "IKE_SA checkout not successful");
1471: }
1472: return ike_sa;
1473: }
1474:
1.1.1.2 ! misho 1475: /**
! 1476: * Data used to track checkouts by config.
! 1477: */
! 1478: typedef struct {
! 1479: /** The peer config for which an IKE_SA is being checked out. */
! 1480: peer_cfg_t *cfg;
! 1481: /** Number of threads checking out SAs for the same config. */
! 1482: int threads;
! 1483: /** A thread is currently creating/finding an SA for this config. */
! 1484: bool working;
! 1485: } config_entry_t;
! 1486:
1.1 misho 1487: METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1488: private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1489: {
1490: enumerator_t *enumerator;
1491: entry_t *entry;
1492: ike_sa_t *ike_sa = NULL;
1493: peer_cfg_t *current_peer;
1494: ike_cfg_t *current_ike;
1.1.1.2 ! misho 1495: config_entry_t *config_entry, *found = NULL;
1.1 misho 1496: u_int segment;
1.1.1.2 ! misho 1497: int i;
1.1 misho 1498:
1499: DBG2(DBG_MGR, "checkout IKE_SA by config");
1500:
1501: if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1502: { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1.1.1.2 ! misho 1503: ike_sa = create_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
! 1504: if (ike_sa)
! 1505: {
! 1506: ike_sa->set_peer_cfg(ike_sa, peer_cfg);
! 1507: checkout_new(this, ike_sa);
! 1508: }
1.1 misho 1509: charon->bus->set_sa(charon->bus, ike_sa);
1510: goto out;
1511: }
1512:
1.1.1.2 ! misho 1513: this->config_mutex->lock(this->config_mutex);
! 1514: for (i = 0; i < array_count(this->config_checkouts); i++)
! 1515: {
! 1516: array_get(this->config_checkouts, i, &config_entry);
! 1517: if (config_entry->cfg->equals(config_entry->cfg, peer_cfg))
! 1518: {
! 1519: current_ike = config_entry->cfg->get_ike_cfg(config_entry->cfg);
! 1520: if (current_ike->equals(current_ike,
! 1521: peer_cfg->get_ike_cfg(peer_cfg)))
! 1522: {
! 1523: found = config_entry;
! 1524: break;
! 1525: }
! 1526: }
! 1527: }
! 1528: if (!found)
! 1529: {
! 1530: INIT(found,
! 1531: .cfg = peer_cfg->get_ref(peer_cfg),
! 1532: );
! 1533: array_insert_create(&this->config_checkouts, ARRAY_TAIL, found);
! 1534: }
! 1535: found->threads++;
! 1536: while (found->working)
! 1537: {
! 1538: this->config_condvar->wait(this->config_condvar, this->config_mutex);
! 1539: }
! 1540: found->working = TRUE;
! 1541: this->config_mutex->unlock(this->config_mutex);
! 1542:
1.1 misho 1543: enumerator = create_table_enumerator(this);
1544: while (enumerator->enumerate(enumerator, &entry, &segment))
1545: {
1546: if (!wait_for_entry(this, entry, segment))
1547: {
1548: continue;
1549: }
1550: if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING ||
1551: entry->ike_sa->get_state(entry->ike_sa) == IKE_REKEYED)
1552: { /* skip IKE_SAs which are not usable, wake other waiting threads */
1553: entry->condvar->signal(entry->condvar);
1554: continue;
1555: }
1556:
1557: current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1558: if (current_peer && current_peer->equals(current_peer, peer_cfg))
1559: {
1560: current_ike = current_peer->get_ike_cfg(current_peer);
1561: if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1562: {
1563: entry->checked_out = thread_current();
1564: ike_sa = entry->ike_sa;
1.1.1.2 ! misho 1565: DBG2(DBG_MGR, "found existing IKE_SA %u with config '%s'",
1.1 misho 1566: ike_sa->get_unique_id(ike_sa),
1567: current_peer->get_name(current_peer));
1568: break;
1569: }
1570: }
1571: /* other threads might be waiting for this entry */
1572: entry->condvar->signal(entry->condvar);
1573: }
1574: enumerator->destroy(enumerator);
1575:
1576: if (!ike_sa)
1.1.1.2 ! misho 1577: {
! 1578: ike_sa = create_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
! 1579: if (ike_sa)
! 1580: {
! 1581: ike_sa->set_peer_cfg(ike_sa, peer_cfg);
! 1582: checkout_new(this, ike_sa);
! 1583: }
1.1 misho 1584: }
1585: charon->bus->set_sa(charon->bus, ike_sa);
1586:
1.1.1.2 ! misho 1587: this->config_mutex->lock(this->config_mutex);
! 1588: found->working = FALSE;
! 1589: found->threads--;
! 1590: if (!found->threads)
! 1591: {
! 1592: for (i = 0; i < array_count(this->config_checkouts); i++)
! 1593: {
! 1594: array_get(this->config_checkouts, i, &config_entry);
! 1595: if (config_entry == found)
! 1596: {
! 1597: array_remove(this->config_checkouts, i, NULL);
! 1598: found->cfg->destroy(found->cfg);
! 1599: free(found);
! 1600: break;
! 1601: }
! 1602: }
! 1603: }
! 1604: this->config_condvar->signal(this->config_condvar);
! 1605: this->config_mutex->unlock(this->config_mutex);
! 1606:
1.1 misho 1607: out:
1608: if (!ike_sa)
1609: {
1610: DBG2(DBG_MGR, "IKE_SA checkout not successful");
1611: }
1612: return ike_sa;
1613: }
1614:
1615: METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1616: private_ike_sa_manager_t *this, uint32_t id)
1617: {
1618: enumerator_t *enumerator;
1619: entry_t *entry;
1620: ike_sa_t *ike_sa = NULL;
1621: u_int segment;
1622:
1623: DBG2(DBG_MGR, "checkout IKE_SA by unique ID %u", id);
1624:
1625: enumerator = create_table_enumerator(this);
1626: while (enumerator->enumerate(enumerator, &entry, &segment))
1627: {
1628: if (wait_for_entry(this, entry, segment))
1629: {
1630: if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1631: {
1632: ike_sa = entry->ike_sa;
1633: entry->checked_out = thread_current();
1634: break;
1635: }
1636: /* other threads might be waiting for this entry */
1637: entry->condvar->signal(entry->condvar);
1638: }
1639: }
1640: enumerator->destroy(enumerator);
1641:
1642: if (ike_sa)
1643: {
1644: DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1645: ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1646: }
1647: else
1648: {
1649: DBG2(DBG_MGR, "IKE_SA checkout not successful");
1650: }
1651: charon->bus->set_sa(charon->bus, ike_sa);
1652: return ike_sa;
1653: }
1654:
1655: METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1656: private_ike_sa_manager_t *this, char *name, bool child)
1657: {
1658: enumerator_t *enumerator, *children;
1659: entry_t *entry;
1660: ike_sa_t *ike_sa = NULL;
1661: child_sa_t *child_sa;
1662: u_int segment;
1663:
1664: DBG2(DBG_MGR, "checkout IKE_SA by%s name '%s'", child ? " child" : "", name);
1665:
1666: enumerator = create_table_enumerator(this);
1667: while (enumerator->enumerate(enumerator, &entry, &segment))
1668: {
1669: if (wait_for_entry(this, entry, segment))
1670: {
1671: /* look for a child with such a policy name ... */
1672: if (child)
1673: {
1674: children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1675: while (children->enumerate(children, (void**)&child_sa))
1676: {
1677: if (streq(child_sa->get_name(child_sa), name))
1678: {
1679: ike_sa = entry->ike_sa;
1680: break;
1681: }
1682: }
1683: children->destroy(children);
1684: }
1685: else /* ... or for a IKE_SA with such a connection name */
1686: {
1687: if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1688: {
1689: ike_sa = entry->ike_sa;
1690: }
1691: }
1692: /* got one, return */
1693: if (ike_sa)
1694: {
1695: entry->checked_out = thread_current();
1696: DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1697: ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1698: break;
1699: }
1700: /* other threads might be waiting for this entry */
1701: entry->condvar->signal(entry->condvar);
1702: }
1703: }
1704: enumerator->destroy(enumerator);
1705:
1706: charon->bus->set_sa(charon->bus, ike_sa);
1707:
1708: if (!ike_sa)
1709: {
1710: DBG2(DBG_MGR, "IKE_SA checkout not successful");
1711: }
1712: return ike_sa;
1713: }
1714:
1715: METHOD(ike_sa_manager_t, new_initiator_spi, bool,
1716: private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1717: {
1718: ike_sa_state_t state;
1719: ike_sa_id_t *ike_sa_id;
1720: entry_t *entry;
1721: u_int segment;
1722: uint64_t new_spi, spi;
1723:
1724: state = ike_sa->get_state(ike_sa);
1725: if (state != IKE_CONNECTING)
1726: {
1727: DBG1(DBG_MGR, "unable to change initiator SPI for IKE_SA in state "
1728: "%N", ike_sa_state_names, state);
1729: return FALSE;
1730: }
1731:
1732: ike_sa_id = ike_sa->get_id(ike_sa);
1733: if (!ike_sa_id->is_initiator(ike_sa_id))
1734: {
1735: DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA as responder");
1736: return FALSE;
1737: }
1738:
1739: if (ike_sa != charon->bus->get_sa(charon->bus))
1740: {
1741: DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA not checked "
1742: "out by current thread");
1743: return FALSE;
1744: }
1745:
1746: new_spi = get_spi(this);
1747: if (!new_spi)
1748: {
1749: DBG1(DBG_MGR, "unable to allocate new initiator SPI for IKE_SA");
1750: return FALSE;
1751: }
1752:
1753: if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1754: {
1755: if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1756: { /* it looks like flush() has been called and the SA is being deleted
1757: * anyway, no need for a new SPI */
1758: DBG2(DBG_MGR, "ignored change of initiator SPI during shutdown");
1759: unlock_single_segment(this, segment);
1760: return FALSE;
1761: }
1762: }
1763: else
1764: {
1765: DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA, not found");
1766: return FALSE;
1767: }
1768:
1769: /* the hashtable row and segment are determined by the local SPI as
1770: * initiator, so if we change it the row and segment derived from it might
1771: * change as well. This could be a problem for threads waiting for the
1772: * entry (in particular those enumerating entries to check them out by
1773: * unique ID or name). In order to avoid having to drive them out and thus
1774: * preventing them from checking out the entry (even though the ID or name
1775: * will not change and enumerating it is also fine), we mask the new SPI and
1776: * merge it with the old SPI so the entry ends up in the same row/segment.
1777: * Since SPIs are 64-bit and the number of rows/segments is usually
1778: * relatively low this should not be a problem. */
1779: spi = ike_sa_id->get_initiator_spi(ike_sa_id);
1780: new_spi = (spi & (uint64_t)this->table_mask) |
1781: (new_spi & ~(uint64_t)this->table_mask);
1782:
1783: DBG2(DBG_MGR, "change initiator SPI of IKE_SA %s[%u] from %.16"PRIx64" to "
1784: "%.16"PRIx64, ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa),
1785: be64toh(spi), be64toh(new_spi));
1786:
1787: ike_sa_id->set_initiator_spi(ike_sa_id, new_spi);
1788: entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa_id);
1789:
1790: entry->condvar->signal(entry->condvar);
1791: unlock_single_segment(this, segment);
1792: return TRUE;
1793: }
1794:
1795: CALLBACK(enumerator_filter_wait, bool,
1796: private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1797: {
1798: entry_t *entry;
1799: u_int segment;
1800: ike_sa_t **out;
1801:
1802: VA_ARGS_VGET(args, out);
1803:
1804: while (orig->enumerate(orig, &entry, &segment))
1805: {
1806: if (wait_for_entry(this, entry, segment))
1807: {
1808: *out = entry->ike_sa;
1809: charon->bus->set_sa(charon->bus, *out);
1810: return TRUE;
1811: }
1812: }
1813: return FALSE;
1814: }
1815:
1816: CALLBACK(enumerator_filter_skip, bool,
1817: private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1818: {
1819: entry_t *entry;
1820: u_int segment;
1821: ike_sa_t **out;
1822:
1823: VA_ARGS_VGET(args, out);
1824:
1825: while (orig->enumerate(orig, &entry, &segment))
1826: {
1827: if (!entry->driveout_new_threads &&
1828: !entry->driveout_waiting_threads &&
1829: !entry->checked_out)
1830: {
1831: *out = entry->ike_sa;
1832: charon->bus->set_sa(charon->bus, *out);
1833: return TRUE;
1834: }
1835: }
1836: return FALSE;
1837: }
1838:
1839: CALLBACK(reset_sa, void,
1840: void *data)
1841: {
1842: charon->bus->set_sa(charon->bus, NULL);
1843: }
1844:
1845: METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1846: private_ike_sa_manager_t* this, bool wait)
1847: {
1848: return enumerator_create_filter(create_table_enumerator(this),
1849: wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1850: this, reset_sa);
1851: }
1852:
1853: METHOD(ike_sa_manager_t, checkin, void,
1854: private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1855: {
1856: /* to check the SA back in, we look for the pointer of the ike_sa
1857: * in all entries.
1858: * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1859: * on reception of a IKE_SA_INIT response) the lookup will work but
1860: * updating of the SPI MAY be necessary...
1861: */
1862: entry_t *entry;
1863: ike_sa_id_t *ike_sa_id;
1864: host_t *other;
1865: identification_t *my_id, *other_id;
1866: u_int segment;
1867:
1868: ike_sa_id = ike_sa->get_id(ike_sa);
1869: my_id = ike_sa->get_my_id(ike_sa);
1870: other_id = ike_sa->get_other_eap_id(ike_sa);
1871: other = ike_sa->get_other_host(ike_sa);
1872:
1873: DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1874: ike_sa->get_unique_id(ike_sa));
1875:
1876: /* look for the entry */
1877: if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1878: {
1879: /* ike_sa_id must be updated */
1880: entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1881: /* signal waiting threads */
1882: entry->checked_out = NULL;
1883: entry->processing = -1;
1884: /* check if this SA is half-open */
1885: if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1886: {
1887: /* not half open anymore */
1888: entry->half_open = FALSE;
1889: remove_half_open(this, entry);
1890: }
1891: else if (entry->half_open && !other->ip_equals(other, entry->other))
1892: {
1893: /* the other host's IP has changed, we must update the hash table */
1894: remove_half_open(this, entry);
1895: DESTROY_IF(entry->other);
1896: entry->other = other->clone(other);
1897: put_half_open(this, entry);
1898: }
1899: else if (!entry->half_open &&
1900: ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1901: {
1902: /* this is a new half-open SA */
1903: entry->half_open = TRUE;
1904: entry->other = other->clone(other);
1905: put_half_open(this, entry);
1906: }
1907: entry->condvar->signal(entry->condvar);
1908: }
1909: else
1910: {
1.1.1.2 ! misho 1911: segment = create_and_put_entry(this, ike_sa, &entry);
1.1 misho 1912: }
1913: DBG2(DBG_MGR, "checkin of IKE_SA successful");
1914:
1915: /* apply identities for duplicate test */
1916: if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1917: ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1918: entry->my_id == NULL && entry->other_id == NULL)
1919: {
1920: if (ike_sa->get_version(ike_sa) == IKEV1)
1921: {
1922: /* If authenticated and received INITIAL_CONTACT,
1923: * delete any existing IKE_SAs with that peer. */
1924: if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1925: {
1926: /* We can't hold the segment locked while checking the
1927: * uniqueness as this could lead to deadlocks. We mark the
1928: * entry as checked out while we release the lock so no other
1929: * thread can acquire it. Since it is not yet in the list of
1930: * connected peers that will not cause a deadlock as no other
1931: * caller of check_uniqueness() will try to check out this SA */
1932: entry->checked_out = thread_current();
1933: unlock_single_segment(this, segment);
1934:
1935: this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1936: ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1937:
1938: /* The entry could have been modified in the mean time, e.g.
1939: * because another SA was added/removed next to it or another
1940: * thread is waiting, but it should still exist, so there is no
1941: * need for a lookup via get_entry_by... */
1942: lock_single_segment(this, segment);
1943: entry->checked_out = NULL;
1944: /* We already signaled waiting threads above, we have to do that
1945: * again after checking the SA out and back in again. */
1946: entry->condvar->signal(entry->condvar);
1947: }
1948: }
1949:
1950: entry->my_id = my_id->clone(my_id);
1951: entry->other_id = other_id->clone(other_id);
1952: if (!entry->other)
1953: {
1954: entry->other = other->clone(other);
1955: }
1956: put_connected_peers(this, entry);
1957: }
1958:
1959: unlock_single_segment(this, segment);
1960:
1961: charon->bus->set_sa(charon->bus, NULL);
1962: }
1963:
1964: METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1965: private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1966: {
1967: /* deletion is a bit complex, we must ensure that no thread is waiting for
1968: * this SA.
1969: * We take this SA from the table, and start signaling while threads
1970: * are in the condvar.
1971: */
1972: entry_t *entry;
1973: ike_sa_id_t *ike_sa_id;
1974: u_int segment;
1975:
1976: ike_sa_id = ike_sa->get_id(ike_sa);
1977:
1978: DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1979: ike_sa->get_unique_id(ike_sa));
1980:
1981: if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1982: {
1983: if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1984: { /* it looks like flush() has been called and the SA is being deleted
1985: * anyway, just check it in */
1986: DBG2(DBG_MGR, "ignored checkin and destroy of IKE_SA during shutdown");
1987: entry->checked_out = NULL;
1988: entry->condvar->broadcast(entry->condvar);
1989: unlock_single_segment(this, segment);
1990: return;
1991: }
1992:
1993: /* drive out waiting threads, as we are in hurry */
1994: entry->driveout_waiting_threads = TRUE;
1995: /* mark it, so no new threads can get this entry */
1996: entry->driveout_new_threads = TRUE;
1997: /* wait until all workers have done their work */
1998: while (entry->waiting_threads)
1999: {
2000: /* wake up all */
2001: entry->condvar->broadcast(entry->condvar);
2002: /* they will wake us again when their work is done */
2003: entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2004: }
2005: remove_entry(this, entry);
2006: unlock_single_segment(this, segment);
2007:
2008: if (entry->half_open)
2009: {
2010: remove_half_open(this, entry);
2011: }
2012: if (entry->my_id && entry->other_id)
2013: {
2014: remove_connected_peers(this, entry);
2015: }
2016: if (entry->init_hash.ptr)
2017: {
2018: remove_init_hash(this, entry->init_hash);
2019: }
2020:
2021: entry_destroy(entry);
2022:
2023: DBG2(DBG_MGR, "checkin and destroy of IKE_SA successful");
2024: }
2025: else
2026: {
2027: DBG1(DBG_MGR, "tried to checkin and delete nonexistent IKE_SA");
2028: ike_sa->destroy(ike_sa);
2029: }
2030: charon->bus->set_sa(charon->bus, NULL);
2031: }
2032:
2033: /**
2034: * Cleanup function for create_id_enumerator
2035: */
2036: static void id_enumerator_cleanup(linked_list_t *ids)
2037: {
2038: ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
2039: }
2040:
2041: METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
2042: private_ike_sa_manager_t *this, identification_t *me,
2043: identification_t *other, int family)
2044: {
2045: table_item_t *item;
2046: u_int row, segment;
2047: rwlock_t *lock;
2048: linked_list_t *ids = NULL;
2049:
2050: row = chunk_hash_inc(other->get_encoding(other),
2051: chunk_hash(me->get_encoding(me))) & this->table_mask;
2052: segment = row & this->segment_mask;
2053:
2054: lock = this->connected_peers_segments[segment].lock;
2055: lock->read_lock(lock);
2056: item = this->connected_peers_table[row];
2057: while (item)
2058: {
2059: connected_peers_t *current = item->value;
2060:
2061: if (connected_peers_match(current, me, other, family))
2062: {
2063: ids = current->sas->clone_offset(current->sas,
2064: offsetof(ike_sa_id_t, clone));
2065: break;
2066: }
2067: item = item->next;
2068: }
2069: lock->unlock(lock);
2070:
2071: if (!ids)
2072: {
2073: return enumerator_create_empty();
2074: }
2075: return enumerator_create_cleaner(ids->create_enumerator(ids),
2076: (void*)id_enumerator_cleanup, ids);
2077: }
2078:
2079: /**
2080: * Move all CHILD_SAs and virtual IPs from old to new
2081: */
2082: static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
2083: {
2084: enumerator_t *enumerator;
2085: child_sa_t *child_sa;
2086: host_t *vip;
2087: int chcount = 0, vipcount = 0;
2088:
2089: charon->bus->children_migrate(charon->bus, new->get_id(new),
2090: new->get_unique_id(new));
2091: enumerator = old->create_child_sa_enumerator(old);
2092: while (enumerator->enumerate(enumerator, &child_sa))
2093: {
2094: old->remove_child_sa(old, enumerator);
2095: new->add_child_sa(new, child_sa);
2096: chcount++;
2097: }
2098: enumerator->destroy(enumerator);
2099:
2100: new->adopt_child_tasks(new, old);
2101:
2102: enumerator = old->create_virtual_ip_enumerator(old, FALSE);
2103: while (enumerator->enumerate(enumerator, &vip))
2104: {
2105: new->add_virtual_ip(new, FALSE, vip);
2106: vipcount++;
2107: }
2108: enumerator->destroy(enumerator);
2109: /* this does not release the addresses, which is good, but it does trigger
2110: * an assign_vips(FALSE) event... */
2111: old->clear_virtual_ips(old, FALSE);
2112: /* ...trigger the analogous event on the new SA */
2113: charon->bus->set_sa(charon->bus, new);
2114: charon->bus->assign_vips(charon->bus, new, TRUE);
2115: charon->bus->children_migrate(charon->bus, NULL, 0);
2116: charon->bus->set_sa(charon->bus, old);
2117:
2118: if (chcount || vipcount)
2119: {
2120: DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
2121: "children and %d virtual IPs", chcount, vipcount);
2122: }
2123: }
2124:
2125: /**
2126: * Delete an existing IKE_SA due to a unique replace policy
2127: */
2128: static status_t enforce_replace(private_ike_sa_manager_t *this,
2129: ike_sa_t *duplicate, ike_sa_t *new,
2130: identification_t *other, host_t *host)
2131: {
2132: charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
2133:
2134: if (host->equals(host, duplicate->get_other_host(duplicate)))
2135: {
2136: /* looks like a reauthentication attempt */
2137: if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
2138: new->get_version(new) == IKEV1)
2139: {
2140: /* IKEv1 implicitly takes over children, IKEv2 recreates them
2141: * explicitly. */
2142: adopt_children_and_vips(duplicate, new);
2143: }
2144: /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
2145: * peers need to complete the new SA first, otherwise the quick modes
2146: * might get lost. For IKEv2 we do the same, as we want overlapping
2147: * CHILD_SAs to keep connectivity up. */
2148: lib->scheduler->schedule_job(lib->scheduler, (job_t*)
2149: delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
2150: DBG1(DBG_IKE, "schedule delete of duplicate IKE_SA for peer '%Y' due "
2151: "to uniqueness policy and suspected reauthentication", other);
2152: return SUCCESS;
2153: }
2154: DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
2155: "uniqueness policy", other);
2156: return duplicate->delete(duplicate, FALSE);
2157: }
2158:
2159: METHOD(ike_sa_manager_t, check_uniqueness, bool,
2160: private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
2161: {
2162: bool cancel = FALSE;
2163: peer_cfg_t *peer_cfg;
2164: unique_policy_t policy;
2165: enumerator_t *enumerator;
2166: ike_sa_id_t *id = NULL;
2167: identification_t *me, *other;
2168: host_t *other_host;
2169:
2170: peer_cfg = ike_sa->get_peer_cfg(ike_sa);
2171: policy = peer_cfg->get_unique_policy(peer_cfg);
2172: if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
2173: {
2174: return FALSE;
2175: }
2176: me = ike_sa->get_my_id(ike_sa);
2177: other = ike_sa->get_other_eap_id(ike_sa);
2178: other_host = ike_sa->get_other_host(ike_sa);
2179:
2180: enumerator = create_id_enumerator(this, me, other,
2181: other_host->get_family(other_host));
2182: while (enumerator->enumerate(enumerator, &id))
2183: {
2184: status_t status = SUCCESS;
2185: ike_sa_t *duplicate;
2186:
2187: duplicate = checkout(this, id);
2188: if (!duplicate)
2189: {
2190: continue;
2191: }
2192: if (force_replace)
2193: {
2194: DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
2195: "received INITIAL_CONTACT", other);
2196: charon->bus->ike_updown(charon->bus, duplicate, FALSE);
2197: checkin_and_destroy(this, duplicate);
2198: continue;
2199: }
2200: peer_cfg = duplicate->get_peer_cfg(duplicate);
2201: if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
2202: {
2203: switch (duplicate->get_state(duplicate))
2204: {
2205: case IKE_ESTABLISHED:
2206: case IKE_REKEYING:
2207: switch (policy)
2208: {
2209: case UNIQUE_REPLACE:
2210: status = enforce_replace(this, duplicate, ike_sa,
2211: other, other_host);
2212: break;
2213: case UNIQUE_KEEP:
2214: /* potential reauthentication? */
2215: if (!other_host->equals(other_host,
2216: duplicate->get_other_host(duplicate)))
2217: {
2218: cancel = TRUE;
2219: /* we keep the first IKE_SA and delete all
2220: * other duplicates that might exist */
2221: policy = UNIQUE_REPLACE;
2222: }
2223: break;
2224: default:
2225: break;
2226: }
2227: break;
2228: default:
2229: break;
2230: }
2231: }
2232: if (status == DESTROY_ME)
2233: {
2234: checkin_and_destroy(this, duplicate);
2235: }
2236: else
2237: {
2238: checkin(this, duplicate);
2239: }
2240: }
2241: enumerator->destroy(enumerator);
2242: /* reset thread's current IKE_SA after checkin */
2243: charon->bus->set_sa(charon->bus, ike_sa);
2244: return cancel;
2245: }
2246:
2247: METHOD(ike_sa_manager_t, has_contact, bool,
2248: private_ike_sa_manager_t *this, identification_t *me,
2249: identification_t *other, int family)
2250: {
2251: table_item_t *item;
2252: u_int row, segment;
2253: rwlock_t *lock;
2254: bool found = FALSE;
2255:
2256: row = chunk_hash_inc(other->get_encoding(other),
2257: chunk_hash(me->get_encoding(me))) & this->table_mask;
2258: segment = row & this->segment_mask;
2259: lock = this->connected_peers_segments[segment].lock;
2260: lock->read_lock(lock);
2261: item = this->connected_peers_table[row];
2262: while (item)
2263: {
2264: if (connected_peers_match(item->value, me, other, family))
2265: {
2266: found = TRUE;
2267: break;
2268: }
2269: item = item->next;
2270: }
2271: lock->unlock(lock);
2272:
2273: return found;
2274: }
2275:
2276: METHOD(ike_sa_manager_t, get_count, u_int,
2277: private_ike_sa_manager_t *this)
2278: {
2279: return (u_int)ref_cur(&this->total_sa_count);
2280: }
2281:
2282: METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2283: private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2284: {
2285: table_item_t *item;
2286: u_int row, segment;
2287: rwlock_t *lock;
2288: chunk_t addr;
2289: u_int count = 0;
2290:
2291: if (ip)
2292: {
2293: addr = ip->get_address(ip);
2294: row = chunk_hash(addr) & this->table_mask;
2295: segment = row & this->segment_mask;
2296: lock = this->half_open_segments[segment].lock;
2297: lock->read_lock(lock);
2298: item = this->half_open_table[row];
2299: while (item)
2300: {
2301: half_open_t *half_open = item->value;
2302:
2303: if (chunk_equals(addr, half_open->other))
2304: {
2305: count = responder_only ? half_open->count_responder
2306: : half_open->count;
2307: break;
2308: }
2309: item = item->next;
2310: }
2311: lock->unlock(lock);
2312: }
2313: else
2314: {
2315: count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2316: : (u_int)ref_cur(&this->half_open_count);
2317: }
2318: return count;
2319: }
2320:
2321: METHOD(ike_sa_manager_t, set_spi_cb, void,
2322: private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2323: {
2324: this->spi_lock->write_lock(this->spi_lock);
2325: this->spi_cb.cb = callback;
2326: this->spi_cb.data = data;
2327: this->spi_lock->unlock(this->spi_lock);
2328: }
2329:
2330: /**
2331: * Destroy all entries
2332: */
2333: static void destroy_all_entries(private_ike_sa_manager_t *this)
2334: {
2335: enumerator_t *enumerator;
2336: entry_t *entry;
2337: u_int segment;
2338:
2339: enumerator = create_table_enumerator(this);
2340: while (enumerator->enumerate(enumerator, &entry, &segment))
2341: {
2342: charon->bus->set_sa(charon->bus, entry->ike_sa);
2343: if (entry->half_open)
2344: {
2345: remove_half_open(this, entry);
2346: }
2347: if (entry->my_id && entry->other_id)
2348: {
2349: remove_connected_peers(this, entry);
2350: }
2351: if (entry->init_hash.ptr)
2352: {
2353: remove_init_hash(this, entry->init_hash);
2354: }
2355: remove_entry_at((private_enumerator_t*)enumerator);
2356: entry_destroy(entry);
2357: }
2358: enumerator->destroy(enumerator);
2359: charon->bus->set_sa(charon->bus, NULL);
2360: }
2361:
2362: METHOD(ike_sa_manager_t, flush, void,
2363: private_ike_sa_manager_t *this)
2364: {
2365: enumerator_t *enumerator;
2366: entry_t *entry;
2367: u_int segment;
2368:
2369: lock_all_segments(this);
2370: DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2371: /* Step 1: drive out all waiting threads */
2372: DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2373: enumerator = create_table_enumerator(this);
2374: while (enumerator->enumerate(enumerator, &entry, &segment))
2375: {
2376: /* do not accept new threads, drive out waiting threads */
2377: entry->driveout_new_threads = TRUE;
2378: entry->driveout_waiting_threads = TRUE;
2379: }
2380: enumerator->destroy(enumerator);
2381: DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2382: /* Step 2: wait until all are gone */
2383: enumerator = create_table_enumerator(this);
2384: while (enumerator->enumerate(enumerator, &entry, &segment))
2385: {
2386: while (entry->waiting_threads || entry->checked_out)
2387: {
2388: /* wake up all */
2389: entry->condvar->broadcast(entry->condvar);
2390: /* go sleeping until they are gone */
2391: entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2392: }
2393: }
2394: enumerator->destroy(enumerator);
2395: DBG2(DBG_MGR, "delete all IKE_SA's");
2396: /* Step 3: initiate deletion of all IKE_SAs */
2397: enumerator = create_table_enumerator(this);
2398: while (enumerator->enumerate(enumerator, &entry, &segment))
2399: {
2400: charon->bus->set_sa(charon->bus, entry->ike_sa);
2401: entry->ike_sa->delete(entry->ike_sa, TRUE);
2402: }
2403: enumerator->destroy(enumerator);
2404:
2405: DBG2(DBG_MGR, "destroy all entries");
2406: /* Step 4: destroy all entries */
2407: destroy_all_entries(this);
2408: unlock_all_segments(this);
2409:
2410: this->spi_lock->write_lock(this->spi_lock);
2411: DESTROY_IF(this->rng);
2412: this->rng = NULL;
2413: this->spi_cb.cb = NULL;
2414: this->spi_cb.data = NULL;
2415: this->spi_lock->unlock(this->spi_lock);
2416: }
2417:
2418: METHOD(ike_sa_manager_t, destroy, void,
2419: private_ike_sa_manager_t *this)
2420: {
2421: u_int i;
2422:
2423: /* in case new SAs were checked in after flush() was called */
2424: lock_all_segments(this);
2425: destroy_all_entries(this);
2426: unlock_all_segments(this);
2427:
2428: free(this->ike_sa_table);
2429: free(this->half_open_table);
2430: free(this->connected_peers_table);
2431: free(this->init_hashes_table);
2432: for (i = 0; i < this->segment_count; i++)
2433: {
2434: this->segments[i].mutex->destroy(this->segments[i].mutex);
2435: this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2436: this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2437: this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2438: }
2439: free(this->segments);
2440: free(this->half_open_segments);
2441: free(this->connected_peers_segments);
2442: free(this->init_hashes_segments);
2443:
1.1.1.2 ! misho 2444: array_destroy(this->config_checkouts);
! 2445: this->config_mutex->destroy(this->config_mutex);
! 2446: this->config_condvar->destroy(this->config_condvar);
! 2447:
1.1 misho 2448: this->spi_lock->destroy(this->spi_lock);
2449: free(this);
2450: }
2451:
2452: /**
2453: * This function returns the next-highest power of two for the given number.
2454: * The algorithm works by setting all bits on the right-hand side of the most
2455: * significant 1 to 1 and then increments the whole number so it rolls over
2456: * to the nearest power of two. Note: returns 0 for n == 0
2457: */
2458: static u_int get_nearest_powerof2(u_int n)
2459: {
2460: u_int i;
2461:
2462: --n;
2463: for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2464: {
2465: n |= n >> i;
2466: }
2467: return ++n;
2468: }
2469:
2470: /*
2471: * Described in header.
2472: */
2473: ike_sa_manager_t *ike_sa_manager_create()
2474: {
2475: private_ike_sa_manager_t *this;
2476: char *spi_val;
2477: u_int i;
2478:
2479: INIT(this,
2480: .public = {
1.1.1.2 ! misho 2481: .create_new = _create_new,
1.1 misho 2482: .checkout_new = _checkout_new,
1.1.1.2 ! misho 2483: .checkout = _checkout,
1.1 misho 2484: .checkout_by_message = _checkout_by_message,
2485: .checkout_by_config = _checkout_by_config,
2486: .checkout_by_id = _checkout_by_id,
2487: .checkout_by_name = _checkout_by_name,
2488: .new_initiator_spi = _new_initiator_spi,
2489: .check_uniqueness = _check_uniqueness,
2490: .has_contact = _has_contact,
2491: .create_enumerator = _create_enumerator,
2492: .create_id_enumerator = _create_id_enumerator,
2493: .checkin = _checkin,
2494: .checkin_and_destroy = _checkin_and_destroy,
2495: .get_count = _get_count,
2496: .get_half_open_count = _get_half_open_count,
2497: .flush = _flush,
2498: .set_spi_cb = _set_spi_cb,
2499: .destroy = _destroy,
2500: },
2501: );
2502:
2503: this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2504: if (this->rng == NULL)
2505: {
2506: DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2507: free(this);
2508: return NULL;
2509: }
2510: this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2511: spi_val = lib->settings->get_str(lib->settings, "%s.spi_mask", NULL,
2512: lib->ns);
2513: this->spi_mask = settings_value_as_uint64(spi_val, 0);
2514: spi_val = lib->settings->get_str(lib->settings, "%s.spi_label", NULL,
2515: lib->ns);
2516: this->spi_label = settings_value_as_uint64(spi_val, 0);
2517: if (this->spi_mask || this->spi_label)
2518: {
2519: DBG1(DBG_IKE, "using SPI label 0x%.16"PRIx64" and mask 0x%.16"PRIx64,
2520: this->spi_label, this->spi_mask);
2521: /* the allocated SPI is assumed to be in network order */
2522: this->spi_mask = htobe64(this->spi_mask);
2523: this->spi_label = htobe64(this->spi_label);
2524: }
2525:
2526: this->ikesa_limit = lib->settings->get_int(lib->settings,
2527: "%s.ikesa_limit", 0, lib->ns);
2528:
2529: this->table_size = get_nearest_powerof2(lib->settings->get_int(
2530: lib->settings, "%s.ikesa_table_size",
2531: DEFAULT_HASHTABLE_SIZE, lib->ns));
2532: this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2533: this->table_mask = this->table_size - 1;
2534:
2535: this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2536: lib->settings, "%s.ikesa_table_segments",
2537: DEFAULT_SEGMENT_COUNT, lib->ns));
2538: this->segment_count = max(1, min(this->segment_count, this->table_size));
2539: this->segment_mask = this->segment_count - 1;
2540:
2541: this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2542: this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2543: for (i = 0; i < this->segment_count; i++)
2544: {
2545: this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2546: }
2547:
2548: /* we use the same table parameters for the table to track half-open SAs */
2549: this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2550: this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2551: for (i = 0; i < this->segment_count; i++)
2552: {
2553: this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2554: }
2555:
2556: /* also for the hash table used for duplicate tests */
2557: this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2558: this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2559: for (i = 0; i < this->segment_count; i++)
2560: {
2561: this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2562: }
2563:
2564: /* and again for the table of hashes of seen initial IKE messages */
2565: this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2566: this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2567: for (i = 0; i < this->segment_count; i++)
2568: {
2569: this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2570: }
2571:
1.1.1.2 ! misho 2572: this->config_mutex = mutex_create(MUTEX_TYPE_DEFAULT);
! 2573: this->config_condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
! 2574:
1.1 misho 2575: this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2576: "%s.reuse_ikesa", TRUE, lib->ns);
2577: return &this->public;
2578: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>