1 |
|
|
/* $OpenBSD: rde_dual.c,v 1.28 2016/09/02 16:44:33 renato Exp $ */ |
2 |
|
|
|
3 |
|
|
/* |
4 |
|
|
* Copyright (c) 2015 Renato Westphal <renato@openbsd.org> |
5 |
|
|
* |
6 |
|
|
* Permission to use, copy, modify, and distribute this software for any |
7 |
|
|
* purpose with or without fee is hereby granted, provided that the above |
8 |
|
|
* copyright notice and this permission notice appear in all copies. |
9 |
|
|
* |
10 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 |
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 |
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 |
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 |
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 |
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 |
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 |
|
|
*/ |
18 |
|
|
|
19 |
|
|
#include <sys/types.h> |
20 |
|
|
|
21 |
|
|
#include <stdlib.h> |
22 |
|
|
#include <string.h> |
23 |
|
|
|
24 |
|
|
#include "eigrpd.h" |
25 |
|
|
#include "eigrpe.h" |
26 |
|
|
#include "rde.h" |
27 |
|
|
#include "log.h" |
28 |
|
|
|
29 |
|
|
static int dual_fsm(struct rt_node *, enum dual_event); |
30 |
|
|
static __inline int rt_compare(struct rt_node *, struct rt_node *); |
31 |
|
|
static struct rt_node *rt_find(struct eigrp *, struct rinfo *); |
32 |
|
|
static struct rt_node *rt_new(struct eigrp *, struct rinfo *); |
33 |
|
|
static struct eigrp_route *route_find(struct rde_nbr *, struct rt_node *); |
34 |
|
|
static struct eigrp_route *route_new(struct rt_node *, struct rde_nbr *, |
35 |
|
|
struct rinfo *); |
36 |
|
|
static void route_del(struct rt_node *, struct eigrp_route *); |
37 |
|
|
static uint32_t safe_sum_uint32(uint32_t, uint32_t); |
38 |
|
|
static uint32_t safe_mul_uint32(uint32_t, uint32_t); |
39 |
|
|
static uint32_t route_composite_metric(uint8_t *, uint32_t, uint32_t, |
40 |
|
|
uint8_t, uint8_t); |
41 |
|
|
static void route_update_metrics(struct eigrp *, |
42 |
|
|
struct eigrp_route *, struct rinfo *); |
43 |
|
|
static void reply_outstanding_add(struct rt_node *, |
44 |
|
|
struct rde_nbr *); |
45 |
|
|
static struct reply_node *reply_outstanding_find(struct rt_node *, |
46 |
|
|
struct rde_nbr *); |
47 |
|
|
static void reply_outstanding_remove(struct reply_node *); |
48 |
|
|
static void reply_active_timer(int, short, void *); |
49 |
|
|
static void reply_active_start_timer(struct reply_node *); |
50 |
|
|
static void reply_active_stop_timer(struct reply_node *); |
51 |
|
|
static void reply_sia_timer(int, short, void *); |
52 |
|
|
static void reply_sia_start_timer(struct reply_node *); |
53 |
|
|
static void reply_sia_stop_timer(struct reply_node *); |
54 |
|
|
static void rinfo_fill_infinite(struct rt_node *, enum route_type, |
55 |
|
|
struct rinfo *); |
56 |
|
|
static void rt_update_fib(struct rt_node *); |
57 |
|
|
static void rt_set_successor(struct rt_node *, |
58 |
|
|
struct eigrp_route *); |
59 |
|
|
static struct eigrp_route *rt_get_successor_fc(struct rt_node *); |
60 |
|
|
static void rde_send_update(struct eigrp_iface *, struct rinfo *); |
61 |
|
|
static void rde_send_update_all(struct rt_node *, struct rinfo *); |
62 |
|
|
static void rde_send_query(struct eigrp_iface *, struct rinfo *, |
63 |
|
|
int); |
64 |
|
|
static void rde_send_siaquery(struct rde_nbr *, struct rinfo *); |
65 |
|
|
static void rde_send_query_all(struct eigrp *, struct rt_node *, |
66 |
|
|
int); |
67 |
|
|
static void rde_send_reply(struct rde_nbr *, struct rinfo *, int); |
68 |
|
|
static void rde_last_reply(struct rt_node *); |
69 |
|
|
static __inline int rde_nbr_compare(struct rde_nbr *, struct rde_nbr *); |
70 |
|
|
|
71 |
|
|
RB_GENERATE(rt_tree, rt_node, entry, rt_compare) |
72 |
|
|
RB_GENERATE(rde_nbr_head, rde_nbr, entry, rde_nbr_compare) |
73 |
|
|
|
74 |
|
|
struct rde_nbr_head rde_nbrs = RB_INITIALIZER(&rde_nbrs); |
75 |
|
|
|
76 |
|
|
/* |
77 |
|
|
* NOTE: events that don't cause a state transition aren't triggered to avoid |
78 |
|
|
* too much verbosity and are here mostly for illustration purposes. |
79 |
|
|
*/ |
80 |
|
|
static struct { |
81 |
|
|
int state; |
82 |
|
|
enum dual_event event; |
83 |
|
|
int new_state; |
84 |
|
|
} dual_fsm_tbl[] = { |
85 |
|
|
/* current state event resulting state */ |
86 |
|
|
/* Passive */ |
87 |
|
|
{DUAL_STA_PASSIVE, DUAL_EVT_1, 0}, |
88 |
|
|
{DUAL_STA_PASSIVE, DUAL_EVT_2, 0}, |
89 |
|
|
{DUAL_STA_PASSIVE, DUAL_EVT_3, DUAL_STA_ACTIVE3}, |
90 |
|
|
{DUAL_STA_PASSIVE, DUAL_EVT_4, DUAL_STA_ACTIVE1}, |
91 |
|
|
/* Active Oij=0 */ |
92 |
|
|
{DUAL_STA_ACTIVE0, DUAL_EVT_5, DUAL_STA_ACTIVE2}, |
93 |
|
|
{DUAL_STA_ACTIVE0, DUAL_EVT_11, DUAL_STA_ACTIVE1}, |
94 |
|
|
{DUAL_STA_ACTIVE0, DUAL_EVT_14, DUAL_STA_PASSIVE}, |
95 |
|
|
/* Active Oij=1 */ |
96 |
|
|
{DUAL_STA_ACTIVE1, DUAL_EVT_5, DUAL_STA_ACTIVE2}, |
97 |
|
|
{DUAL_STA_ACTIVE1, DUAL_EVT_9, DUAL_STA_ACTIVE0}, |
98 |
|
|
{DUAL_STA_ACTIVE1, DUAL_EVT_15, DUAL_STA_PASSIVE}, |
99 |
|
|
/* Active Oij=2 */ |
100 |
|
|
{DUAL_STA_ACTIVE2, DUAL_EVT_12, DUAL_STA_ACTIVE3}, |
101 |
|
|
{DUAL_STA_ACTIVE2, DUAL_EVT_16, DUAL_STA_PASSIVE}, |
102 |
|
|
/* Active Oij=3 */ |
103 |
|
|
{DUAL_STA_ACTIVE3, DUAL_EVT_10, DUAL_STA_ACTIVE2}, |
104 |
|
|
{DUAL_STA_ACTIVE3, DUAL_EVT_13, DUAL_STA_PASSIVE}, |
105 |
|
|
/* Active (all) */ |
106 |
|
|
{DUAL_STA_ACTIVE_ALL, DUAL_EVT_6, 0}, |
107 |
|
|
{DUAL_STA_ACTIVE_ALL, DUAL_EVT_7, 0}, |
108 |
|
|
{DUAL_STA_ACTIVE_ALL, DUAL_EVT_8, 0}, |
109 |
|
|
/* sentinel */ |
110 |
|
|
{-1, 0, 0}, |
111 |
|
|
}; |
112 |
|
|
|
113 |
|
|
static const char * const dual_event_names[] = { |
114 |
|
|
"DUAL_EVT_1", |
115 |
|
|
"DUAL_EVT_2", |
116 |
|
|
"DUAL_EVT_3", |
117 |
|
|
"DUAL_EVT_4", |
118 |
|
|
"DUAL_EVT_5", |
119 |
|
|
"DUAL_EVT_6", |
120 |
|
|
"DUAL_EVT_7", |
121 |
|
|
"DUAL_EVT_8", |
122 |
|
|
"DUAL_EVT_9", |
123 |
|
|
"DUAL_EVT_10", |
124 |
|
|
"DUAL_EVT_11", |
125 |
|
|
"DUAL_EVT_12", |
126 |
|
|
"DUAL_EVT_13", |
127 |
|
|
"DUAL_EVT_14", |
128 |
|
|
"DUAL_EVT_15", |
129 |
|
|
"DUAL_EVT_16" |
130 |
|
|
}; |
131 |
|
|
|
132 |
|
|
static int |
133 |
|
|
dual_fsm(struct rt_node *rn, enum dual_event event) |
134 |
|
|
{ |
135 |
|
|
int old_state; |
136 |
|
|
int new_state = 0; |
137 |
|
|
int i; |
138 |
|
|
|
139 |
|
|
old_state = rn->state; |
140 |
|
|
for (i = 0; dual_fsm_tbl[i].state != -1; i++) |
141 |
|
|
if ((dual_fsm_tbl[i].state & old_state) && |
142 |
|
|
(dual_fsm_tbl[i].event == event)) { |
143 |
|
|
new_state = dual_fsm_tbl[i].new_state; |
144 |
|
|
break; |
145 |
|
|
} |
146 |
|
|
|
147 |
|
|
if (dual_fsm_tbl[i].state == -1) { |
148 |
|
|
/* event outside of the defined fsm, ignore it. */ |
149 |
|
|
log_warnx("%s: route %s, event %s not expected in state %s", |
150 |
|
|
__func__, log_prefix(rn), dual_event_names[event], |
151 |
|
|
dual_state_name(old_state)); |
152 |
|
|
return (0); |
153 |
|
|
} |
154 |
|
|
|
155 |
|
|
if (new_state != 0) |
156 |
|
|
rn->state = new_state; |
157 |
|
|
|
158 |
|
|
if (old_state != rn->state) { |
159 |
|
|
log_debug("%s: event %s changing state for prefix %s " |
160 |
|
|
"from %s to %s", __func__, dual_event_names[event], |
161 |
|
|
log_prefix(rn), dual_state_name(old_state), |
162 |
|
|
dual_state_name(rn->state)); |
163 |
|
|
|
164 |
|
|
if (old_state == DUAL_STA_PASSIVE || |
165 |
|
|
new_state == DUAL_STA_PASSIVE) |
166 |
|
|
rt_update_fib(rn); |
167 |
|
|
} |
168 |
|
|
|
169 |
|
|
return (0); |
170 |
|
|
} |
171 |
|
|
|
172 |
|
|
static __inline int |
173 |
|
|
rt_compare(struct rt_node *a, struct rt_node *b) |
174 |
|
|
{ |
175 |
|
|
int addrcmp; |
176 |
|
|
|
177 |
|
|
addrcmp = eigrp_addrcmp(a->eigrp->af, &a->prefix, &b->prefix); |
178 |
|
|
if (addrcmp != 0) |
179 |
|
|
return (addrcmp); |
180 |
|
|
|
181 |
|
|
if (a->prefixlen < b->prefixlen) |
182 |
|
|
return (-1); |
183 |
|
|
if (a->prefixlen > b->prefixlen) |
184 |
|
|
return (1); |
185 |
|
|
|
186 |
|
|
return (0); |
187 |
|
|
} |
188 |
|
|
|
189 |
|
|
static struct rt_node * |
190 |
|
|
rt_find(struct eigrp *eigrp, struct rinfo *ri) |
191 |
|
|
{ |
192 |
|
|
struct rt_node rn; |
193 |
|
|
|
194 |
|
|
rn.eigrp = eigrp; |
195 |
|
|
rn.prefix = ri->prefix; |
196 |
|
|
rn.prefixlen = ri->prefixlen; |
197 |
|
|
|
198 |
|
|
return (RB_FIND(rt_tree, &eigrp->topology, &rn)); |
199 |
|
|
} |
200 |
|
|
|
201 |
|
|
static struct rt_node * |
202 |
|
|
rt_new(struct eigrp *eigrp, struct rinfo *ri) |
203 |
|
|
{ |
204 |
|
|
struct rt_node *rn; |
205 |
|
|
|
206 |
|
|
if ((rn = calloc(1, sizeof(*rn))) == NULL) |
207 |
|
|
fatal("rt_new"); |
208 |
|
|
|
209 |
|
|
rn->eigrp = eigrp; |
210 |
|
|
rn->prefix = ri->prefix; |
211 |
|
|
rn->prefixlen = ri->prefixlen; |
212 |
|
|
rn->state = DUAL_STA_PASSIVE; |
213 |
|
|
TAILQ_INIT(&rn->routes); |
214 |
|
|
TAILQ_INIT(&rn->rijk); |
215 |
|
|
rt_set_successor(rn, NULL); |
216 |
|
|
|
217 |
|
|
if (RB_INSERT(rt_tree, &eigrp->topology, rn) != NULL) { |
218 |
|
|
log_warnx("%s failed for %s", __func__, log_prefix(rn)); |
219 |
|
|
free(rn); |
220 |
|
|
return (NULL); |
221 |
|
|
} |
222 |
|
|
|
223 |
|
|
log_debug("%s: prefix %s", __func__, log_prefix(rn)); |
224 |
|
|
|
225 |
|
|
return (rn); |
226 |
|
|
} |
227 |
|
|
|
228 |
|
|
void |
229 |
|
|
rt_del(struct rt_node *rn) |
230 |
|
|
{ |
231 |
|
|
struct eigrp_route *route; |
232 |
|
|
struct reply_node *reply; |
233 |
|
|
|
234 |
|
|
log_debug("%s: prefix %s", __func__, log_prefix(rn)); |
235 |
|
|
|
236 |
|
|
while ((reply = TAILQ_FIRST(&rn->rijk)) != NULL) |
237 |
|
|
reply_outstanding_remove(reply); |
238 |
|
|
while ((route = TAILQ_FIRST(&rn->routes)) != NULL) |
239 |
|
|
route_del(rn, route); |
240 |
|
|
RB_REMOVE(rt_tree, &rn->eigrp->topology, rn); |
241 |
|
|
free(rn); |
242 |
|
|
} |
243 |
|
|
|
244 |
|
|
static struct eigrp_route * |
245 |
|
|
route_find(struct rde_nbr *nbr, struct rt_node *rn) |
246 |
|
|
{ |
247 |
|
|
struct eigrp_route *route; |
248 |
|
|
|
249 |
|
|
TAILQ_FOREACH(route, &rn->routes, entry) |
250 |
|
|
if (route->nbr == nbr) |
251 |
|
|
return (route); |
252 |
|
|
|
253 |
|
|
return (NULL); |
254 |
|
|
} |
255 |
|
|
|
256 |
|
|
static struct eigrp_route * |
257 |
|
|
route_new(struct rt_node *rn, struct rde_nbr *nbr, struct rinfo *ri) |
258 |
|
|
{ |
259 |
|
|
struct eigrp *eigrp = rn->eigrp; |
260 |
|
|
struct eigrp_route *route, *tmp; |
261 |
|
|
|
262 |
|
|
if ((route = calloc(1, sizeof(*route))) == NULL) |
263 |
|
|
fatal("route_new"); |
264 |
|
|
|
265 |
|
|
route->nbr = nbr; |
266 |
|
|
route->type = ri->type; |
267 |
|
|
if (eigrp_addrisset(eigrp->af, &ri->nexthop)) |
268 |
|
|
route->nexthop = ri->nexthop; |
269 |
|
|
else |
270 |
|
|
route->nexthop = nbr->addr; |
271 |
|
|
route_update_metrics(eigrp, route, ri); |
272 |
|
|
|
273 |
|
|
/* order by nexthop */ |
274 |
|
|
TAILQ_FOREACH(tmp, &rn->routes, entry) |
275 |
|
|
if (eigrp_addrcmp(eigrp->af, &tmp->nexthop, |
276 |
|
|
&route->nexthop) > 0) |
277 |
|
|
break; |
278 |
|
|
if (tmp) |
279 |
|
|
TAILQ_INSERT_BEFORE(tmp, route, entry); |
280 |
|
|
else |
281 |
|
|
TAILQ_INSERT_TAIL(&rn->routes, route, entry); |
282 |
|
|
|
283 |
|
|
log_debug("%s: prefix %s via %s distance (%u/%u)", __func__, |
284 |
|
|
log_prefix(rn), log_route_origin(eigrp->af, route->nbr), |
285 |
|
|
route->distance, route->rdistance); |
286 |
|
|
|
287 |
|
|
return (route); |
288 |
|
|
} |
289 |
|
|
|
290 |
|
|
static void |
291 |
|
|
route_del(struct rt_node *rn, struct eigrp_route *route) |
292 |
|
|
{ |
293 |
|
|
struct eigrp *eigrp = rn->eigrp; |
294 |
|
|
|
295 |
|
|
log_debug("%s: prefix %s via %s", __func__, log_prefix(rn), |
296 |
|
|
log_route_origin(eigrp->af, route->nbr)); |
297 |
|
|
|
298 |
|
|
if (route->flags & F_EIGRP_ROUTE_INSTALLED) |
299 |
|
|
rde_send_delete_kroute(rn, route); |
300 |
|
|
|
301 |
|
|
TAILQ_REMOVE(&rn->routes, route, entry); |
302 |
|
|
free(route); |
303 |
|
|
} |
304 |
|
|
|
305 |
|
|
static uint32_t |
306 |
|
|
safe_sum_uint32(uint32_t a, uint32_t b) |
307 |
|
|
{ |
308 |
|
|
uint64_t total; |
309 |
|
|
|
310 |
|
|
total = (uint64_t) a + (uint64_t) b; |
311 |
|
|
|
312 |
|
|
if (total >> 32) |
313 |
|
|
return ((uint32_t )(~0)); |
314 |
|
|
|
315 |
|
|
return ((uint32_t) total); |
316 |
|
|
} |
317 |
|
|
|
318 |
|
|
static uint32_t |
319 |
|
|
safe_mul_uint32(uint32_t a, uint32_t b) |
320 |
|
|
{ |
321 |
|
|
uint64_t total; |
322 |
|
|
|
323 |
|
|
total = (uint64_t) a * (uint64_t) b; |
324 |
|
|
|
325 |
|
|
if (total >> 32) |
326 |
|
|
return ((uint32_t )(~0)); |
327 |
|
|
|
328 |
|
|
return ((uint32_t) total); |
329 |
|
|
} |
330 |
|
|
|
331 |
|
|
uint32_t |
332 |
|
|
eigrp_composite_delay(uint32_t delay) |
333 |
|
|
{ |
334 |
|
|
/* cheap overflow protection */ |
335 |
|
|
delay = min(delay, (1 << 24) - 1); |
336 |
|
|
return (delay * EIGRP_SCALING_FACTOR); |
337 |
|
|
} |
338 |
|
|
|
339 |
|
|
uint32_t |
340 |
|
|
eigrp_real_delay(uint32_t delay) |
341 |
|
|
{ |
342 |
|
|
return (delay / EIGRP_SCALING_FACTOR); |
343 |
|
|
} |
344 |
|
|
|
345 |
|
|
uint32_t |
346 |
|
|
eigrp_composite_bandwidth(uint32_t bandwidth) |
347 |
|
|
{ |
348 |
|
|
/* truncate before applying the scaling factor */ |
349 |
|
|
bandwidth = 10000000 / bandwidth; |
350 |
|
|
return (EIGRP_SCALING_FACTOR * bandwidth); |
351 |
|
|
} |
352 |
|
|
|
353 |
|
|
uint32_t |
354 |
|
|
eigrp_real_bandwidth(uint32_t bandwidth) |
355 |
|
|
{ |
356 |
|
|
/* |
357 |
|
|
* apply the scaling factor before the division and only then truncate. |
358 |
|
|
* this is to keep consistent with what cisco does. |
359 |
|
|
*/ |
360 |
|
|
return ((EIGRP_SCALING_FACTOR * (uint32_t)10000000) / bandwidth); |
361 |
|
|
} |
362 |
|
|
|
363 |
|
|
static uint32_t |
364 |
|
|
route_composite_metric(uint8_t *kvalues, uint32_t delay, uint32_t bandwidth, |
365 |
|
|
uint8_t load, uint8_t reliability) |
366 |
|
|
{ |
367 |
|
|
uint64_t distance; |
368 |
|
|
uint32_t operand1, operand2, operand3; |
369 |
|
|
double operand4; |
370 |
|
|
|
371 |
|
|
/* |
372 |
|
|
* Need to apply the scaling factor before any division to avoid |
373 |
|
|
* losing information from truncation. |
374 |
|
|
*/ |
375 |
|
|
operand1 = safe_mul_uint32(kvalues[0] * EIGRP_SCALING_FACTOR, |
376 |
|
|
10000000 / bandwidth); |
377 |
|
|
operand2 = safe_mul_uint32(kvalues[1] * EIGRP_SCALING_FACTOR, |
378 |
|
|
10000000 / bandwidth) / (256 - load); |
379 |
|
|
operand3 = safe_mul_uint32(kvalues[2] * EIGRP_SCALING_FACTOR, delay); |
380 |
|
|
|
381 |
|
|
distance = (uint64_t) operand1 + (uint64_t) operand2 + |
382 |
|
|
(uint64_t) operand3; |
383 |
|
|
|
384 |
|
|
/* if K5 is set to zero, the last term of the formula is not used */ |
385 |
|
|
if (kvalues[4] != 0) { |
386 |
|
|
operand4 = (double) kvalues[4] / (reliability + kvalues[3]); |
387 |
|
|
/* no risk of overflow (64 bits), operand4 can be at most 255 */ |
388 |
|
|
distance *= operand4; |
389 |
|
|
} |
390 |
|
|
|
391 |
|
|
/* overflow protection */ |
392 |
|
|
if (distance >> 32) |
393 |
|
|
distance = ((uint32_t )(~0)); |
394 |
|
|
|
395 |
|
|
return ((uint32_t) distance); |
396 |
|
|
} |
397 |
|
|
|
398 |
|
|
static void |
399 |
|
|
route_update_metrics(struct eigrp *eigrp, struct eigrp_route *route, |
400 |
|
|
struct rinfo *ri) |
401 |
|
|
{ |
402 |
|
|
struct eigrp_iface *ei = route->nbr->ei; |
403 |
|
|
uint32_t delay, bandwidth; |
404 |
|
|
int mtu; |
405 |
|
|
|
406 |
|
|
route->metric = ri->metric; |
407 |
|
|
route->emetric = ri->emetric; |
408 |
|
|
route->flags |= F_EIGRP_ROUTE_M_CHANGED; |
409 |
|
|
|
410 |
|
|
delay = eigrp_real_delay(route->metric.delay); |
411 |
|
|
bandwidth = eigrp_real_bandwidth(route->metric.bandwidth); |
412 |
|
|
|
413 |
|
|
if (route->nbr->flags & F_RDE_NBR_SELF) |
414 |
|
|
route->rdistance = 0; |
415 |
|
|
else { |
416 |
|
|
route->rdistance = route_composite_metric(eigrp->kvalues, |
417 |
|
|
delay, bandwidth, route->metric.load, |
418 |
|
|
route->metric.reliability); |
419 |
|
|
|
420 |
|
|
/* update the delay */ |
421 |
|
|
delay = safe_sum_uint32(delay, ei->delay); |
422 |
|
|
route->metric.delay = eigrp_composite_delay(delay); |
423 |
|
|
|
424 |
|
|
/* update the bandwidth */ |
425 |
|
|
bandwidth = min(bandwidth, ei->bandwidth); |
426 |
|
|
route->metric.bandwidth = eigrp_composite_bandwidth(bandwidth); |
427 |
|
|
|
428 |
|
|
/* update the mtu */ |
429 |
|
|
mtu = min(metric_decode_mtu(route->metric.mtu), ei->iface->mtu); |
430 |
|
|
metric_encode_mtu(route->metric.mtu, mtu); |
431 |
|
|
|
432 |
|
|
/* update the hop count */ |
433 |
|
|
if (route->metric.hop_count < UINT8_MAX) |
434 |
|
|
route->metric.hop_count++; |
435 |
|
|
} |
436 |
|
|
|
437 |
|
|
route->distance = route_composite_metric(eigrp->kvalues, delay, |
438 |
|
|
bandwidth, DEFAULT_LOAD, DEFAULT_RELIABILITY); |
439 |
|
|
} |
440 |
|
|
|
441 |
|
|
static void |
442 |
|
|
reply_outstanding_add(struct rt_node *rn, struct rde_nbr *nbr) |
443 |
|
|
{ |
444 |
|
|
struct reply_node *reply; |
445 |
|
|
|
446 |
|
|
if ((reply = calloc(1, sizeof(*reply))) == NULL) |
447 |
|
|
fatal("reply_outstanding_add"); |
448 |
|
|
|
449 |
|
|
evtimer_set(&reply->ev_active_timeout, reply_active_timer, reply); |
450 |
|
|
evtimer_set(&reply->ev_sia_timeout, reply_sia_timer, reply); |
451 |
|
|
reply->siaquery_sent = 0; |
452 |
|
|
reply->siareply_recv = 0; |
453 |
|
|
reply->rn = rn; |
454 |
|
|
reply->nbr = nbr; |
455 |
|
|
TAILQ_INSERT_TAIL(&rn->rijk, reply, rn_entry); |
456 |
|
|
TAILQ_INSERT_TAIL(&nbr->rijk, reply, nbr_entry); |
457 |
|
|
|
458 |
|
|
if (rn->eigrp->active_timeout > 0) { |
459 |
|
|
reply_active_start_timer(reply); |
460 |
|
|
reply_sia_start_timer(reply); |
461 |
|
|
} |
462 |
|
|
} |
463 |
|
|
|
464 |
|
|
static struct reply_node * |
465 |
|
|
reply_outstanding_find(struct rt_node *rn, struct rde_nbr *nbr) |
466 |
|
|
{ |
467 |
|
|
struct reply_node *reply; |
468 |
|
|
|
469 |
|
|
TAILQ_FOREACH(reply, &rn->rijk, rn_entry) |
470 |
|
|
if (reply->nbr == nbr) |
471 |
|
|
return (reply); |
472 |
|
|
|
473 |
|
|
return (NULL); |
474 |
|
|
} |
475 |
|
|
|
476 |
|
|
static void |
477 |
|
|
reply_outstanding_remove(struct reply_node *reply) |
478 |
|
|
{ |
479 |
|
|
reply_active_stop_timer(reply); |
480 |
|
|
reply_sia_stop_timer(reply); |
481 |
|
|
TAILQ_REMOVE(&reply->rn->rijk, reply, rn_entry); |
482 |
|
|
TAILQ_REMOVE(&reply->nbr->rijk, reply, nbr_entry); |
483 |
|
|
free(reply); |
484 |
|
|
} |
485 |
|
|
|
486 |
|
|
/* ARGSUSED */ |
487 |
|
|
static void |
488 |
|
|
reply_active_timer(int fd, short event, void *arg) |
489 |
|
|
{ |
490 |
|
|
struct reply_node *reply = arg; |
491 |
|
|
struct rde_nbr *nbr = reply->nbr; |
492 |
|
|
|
493 |
|
|
log_debug("%s: neighbor %s is stuck in active", __func__, |
494 |
|
|
log_addr(nbr->eigrp->af, &nbr->addr)); |
495 |
|
|
|
496 |
|
|
rde_nbr_del(reply->nbr, 1); |
497 |
|
|
} |
498 |
|
|
|
499 |
|
|
static void |
500 |
|
|
reply_active_start_timer(struct reply_node *reply) |
501 |
|
|
{ |
502 |
|
|
struct eigrp *eigrp = reply->nbr->eigrp; |
503 |
|
|
struct timeval tv; |
504 |
|
|
|
505 |
|
|
timerclear(&tv); |
506 |
|
|
tv.tv_sec = eigrp->active_timeout * 60; |
507 |
|
|
if (evtimer_add(&reply->ev_active_timeout, &tv) == -1) |
508 |
|
|
fatal("reply_active_start_timer"); |
509 |
|
|
} |
510 |
|
|
|
511 |
|
|
static void |
512 |
|
|
reply_active_stop_timer(struct reply_node *reply) |
513 |
|
|
{ |
514 |
|
|
if (evtimer_pending(&reply->ev_active_timeout, NULL) && |
515 |
|
|
evtimer_del(&reply->ev_active_timeout) == -1) |
516 |
|
|
fatal("reply_active_stop_timer"); |
517 |
|
|
} |
518 |
|
|
|
519 |
|
|
/* ARGSUSED */ |
520 |
|
|
static void |
521 |
|
|
reply_sia_timer(int fd, short event, void *arg) |
522 |
|
|
{ |
523 |
|
|
struct reply_node *reply = arg; |
524 |
|
|
struct rde_nbr *nbr = reply->nbr; |
525 |
|
|
struct rt_node *rn = reply->rn; |
526 |
|
|
struct rinfo ri; |
527 |
|
|
|
528 |
|
|
log_debug("%s: nbr %s prefix %s", __func__, log_addr(nbr->eigrp->af, |
529 |
|
|
&nbr->addr), log_prefix(rn)); |
530 |
|
|
|
531 |
|
|
if (reply->siaquery_sent > 0 && reply->siareply_recv == 0) { |
532 |
|
|
log_debug("%s: neighbor %s is stuck in active", __func__, |
533 |
|
|
log_addr(nbr->eigrp->af, &nbr->addr)); |
534 |
|
|
rde_nbr_del(nbr, 1); |
535 |
|
|
return; |
536 |
|
|
} |
537 |
|
|
|
538 |
|
|
/* |
539 |
|
|
* draft-savage-eigrp-04 - Section 4.4.1.1: |
540 |
|
|
* "Up to three SIA-QUERY packets for a specific destination may |
541 |
|
|
* be sent, each at a value of one-half the ACTIVE time, so long |
542 |
|
|
* as each are successfully acknowledged and met with an SIA-REPLY". |
543 |
|
|
*/ |
544 |
|
|
if (reply->siaquery_sent >= 3) |
545 |
|
|
return; |
546 |
|
|
|
547 |
|
|
reply->siaquery_sent++; |
548 |
|
|
reply->siareply_recv = 0; |
549 |
|
|
|
550 |
|
|
/* restart sia and active timeouts */ |
551 |
|
|
reply_sia_start_timer(reply); |
552 |
|
|
reply_active_start_timer(reply); |
553 |
|
|
|
554 |
|
|
/* send an sia-query */ |
555 |
|
|
rinfo_fill_successor(rn, &ri); |
556 |
|
|
ri.metric.flags |= F_METRIC_ACTIVE; |
557 |
|
|
rde_send_siaquery(nbr, &ri); |
558 |
|
|
} |
559 |
|
|
|
560 |
|
|
static void |
561 |
|
|
reply_sia_start_timer(struct reply_node *reply) |
562 |
|
|
{ |
563 |
|
|
struct eigrp *eigrp = reply->nbr->eigrp; |
564 |
|
|
struct timeval tv; |
565 |
|
|
|
566 |
|
|
/* |
567 |
|
|
* draft-savage-eigrp-04 - Section 4.4.1.1: |
568 |
|
|
* "The SIA-QUERY packet SHOULD be sent on a per-destination basis |
569 |
|
|
* at one-half of the ACTIVE timeout period." |
570 |
|
|
*/ |
571 |
|
|
timerclear(&tv); |
572 |
|
|
tv.tv_sec = (eigrp->active_timeout * 60) / 2; |
573 |
|
|
if (evtimer_add(&reply->ev_sia_timeout, &tv) == -1) |
574 |
|
|
fatal("reply_sia_start_timer"); |
575 |
|
|
} |
576 |
|
|
|
577 |
|
|
static void |
578 |
|
|
reply_sia_stop_timer(struct reply_node *reply) |
579 |
|
|
{ |
580 |
|
|
if (evtimer_pending(&reply->ev_sia_timeout, NULL) && |
581 |
|
|
evtimer_del(&reply->ev_sia_timeout) == -1) |
582 |
|
|
fatal("reply_sia_stop_timer"); |
583 |
|
|
} |
584 |
|
|
|
585 |
|
|
void |
586 |
|
|
rinfo_fill_successor(struct rt_node *rn, struct rinfo *ri) |
587 |
|
|
{ |
588 |
|
|
if (rn->successor.nbr == NULL) { |
589 |
|
|
rinfo_fill_infinite(rn, EIGRP_ROUTE_INTERNAL, ri); |
590 |
|
|
return; |
591 |
|
|
} |
592 |
|
|
|
593 |
|
|
memset(ri, 0, sizeof(*ri)); |
594 |
|
|
ri->af = rn->eigrp->af; |
595 |
|
|
ri->type = rn->successor.type; |
596 |
|
|
ri->prefix = rn->prefix; |
597 |
|
|
ri->prefixlen = rn->prefixlen; |
598 |
|
|
ri->metric = rn->successor.metric; |
599 |
|
|
if (ri->type == EIGRP_ROUTE_EXTERNAL) |
600 |
|
|
ri->emetric = rn->successor.emetric; |
601 |
|
|
} |
602 |
|
|
|
603 |
|
|
static void |
604 |
|
|
rinfo_fill_infinite(struct rt_node *rn, enum route_type type, struct rinfo *ri) |
605 |
|
|
{ |
606 |
|
|
memset(ri, 0, sizeof(*ri)); |
607 |
|
|
ri->af = rn->eigrp->af; |
608 |
|
|
ri->type = type; |
609 |
|
|
ri->prefix = rn->prefix; |
610 |
|
|
ri->prefixlen = rn->prefixlen; |
611 |
|
|
ri->metric.delay = EIGRP_INFINITE_METRIC; |
612 |
|
|
} |
613 |
|
|
|
614 |
|
|
static void |
615 |
|
|
rt_update_fib(struct rt_node *rn) |
616 |
|
|
{ |
617 |
|
|
struct eigrp *eigrp = rn->eigrp; |
618 |
|
|
uint8_t maximum_paths = eigrp->maximum_paths; |
619 |
|
|
uint8_t variance = eigrp->variance; |
620 |
|
|
int installed = 0; |
621 |
|
|
struct eigrp_route *route; |
622 |
|
|
|
623 |
|
|
if (rn->state == DUAL_STA_PASSIVE) { |
624 |
|
|
/* no multipath for attached networks. */ |
625 |
|
|
if (rn->successor.nbr && |
626 |
|
|
(rn->successor.nbr->flags & F_RDE_NBR_LOCAL)) |
627 |
|
|
return; |
628 |
|
|
|
629 |
|
|
TAILQ_FOREACH(route, &rn->routes, entry) { |
630 |
|
|
/* skip redistributed routes */ |
631 |
|
|
if (route->nbr->flags & F_RDE_NBR_REDIST) |
632 |
|
|
continue; |
633 |
|
|
|
634 |
|
|
/* |
635 |
|
|
* Only feasible successors and the successor itself |
636 |
|
|
* are elegible to be installed. |
637 |
|
|
*/ |
638 |
|
|
if (route->rdistance >= rn->successor.fdistance) |
639 |
|
|
goto uninstall; |
640 |
|
|
|
641 |
|
|
if (route->distance > |
642 |
|
|
(rn->successor.fdistance * variance)) |
643 |
|
|
goto uninstall; |
644 |
|
|
|
645 |
|
|
if (installed >= maximum_paths) |
646 |
|
|
goto uninstall; |
647 |
|
|
|
648 |
|
|
installed++; |
649 |
|
|
|
650 |
|
|
if ((route->flags & F_EIGRP_ROUTE_INSTALLED) && |
651 |
|
|
!(route->flags & F_EIGRP_ROUTE_M_CHANGED)) |
652 |
|
|
continue; |
653 |
|
|
|
654 |
|
|
rde_send_change_kroute(rn, route); |
655 |
|
|
continue; |
656 |
|
|
|
657 |
|
|
uninstall: |
658 |
|
|
if (route->flags & F_EIGRP_ROUTE_INSTALLED) |
659 |
|
|
rde_send_delete_kroute(rn, route); |
660 |
|
|
} |
661 |
|
|
} else { |
662 |
|
|
TAILQ_FOREACH(route, &rn->routes, entry) |
663 |
|
|
if (route->flags & F_EIGRP_ROUTE_INSTALLED) |
664 |
|
|
rde_send_delete_kroute(rn, route); |
665 |
|
|
} |
666 |
|
|
} |
667 |
|
|
|
668 |
|
|
static void |
669 |
|
|
rt_set_successor(struct rt_node *rn, struct eigrp_route *successor) |
670 |
|
|
{ |
671 |
|
|
struct eigrp *eigrp = rn->eigrp; |
672 |
|
|
struct eigrp_iface *ei; |
673 |
|
|
struct summary_addr *summary; |
674 |
|
|
|
675 |
|
|
if (successor == NULL) { |
676 |
|
|
rn->successor.nbr = NULL; |
677 |
|
|
rn->successor.type = 0; |
678 |
|
|
rn->successor.fdistance = EIGRP_INFINITE_METRIC; |
679 |
|
|
rn->successor.rdistance = EIGRP_INFINITE_METRIC; |
680 |
|
|
memset(&rn->successor.metric, 0, |
681 |
|
|
sizeof(rn->successor.metric)); |
682 |
|
|
rn->successor.metric.delay = EIGRP_INFINITE_METRIC; |
683 |
|
|
memset(&rn->successor.emetric, 0, |
684 |
|
|
sizeof(rn->successor.emetric)); |
685 |
|
|
} else { |
686 |
|
|
rn->successor.nbr = successor->nbr; |
687 |
|
|
rn->successor.type = successor->type; |
688 |
|
|
rn->successor.fdistance = successor->distance; |
689 |
|
|
rn->successor.rdistance = successor->rdistance; |
690 |
|
|
rn->successor.metric = successor->metric; |
691 |
|
|
rn->successor.emetric = successor->emetric; |
692 |
|
|
} |
693 |
|
|
|
694 |
|
|
TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry) { |
695 |
|
|
summary = rde_summary_check(ei, &rn->prefix, rn->prefixlen); |
696 |
|
|
if (summary) |
697 |
|
|
rt_summary_set(eigrp, summary, &rn->successor.metric); |
698 |
|
|
} |
699 |
|
|
} |
700 |
|
|
|
701 |
|
|
static struct eigrp_route * |
702 |
|
|
rt_get_successor_fc(struct rt_node *rn) |
703 |
|
|
{ |
704 |
|
|
struct eigrp_route *route, *successor = NULL; |
705 |
|
|
uint32_t distance = EIGRP_INFINITE_METRIC; |
706 |
|
|
int external_only = 1; |
707 |
|
|
|
708 |
|
|
TAILQ_FOREACH(route, &rn->routes, entry) |
709 |
|
|
if (route->type == EIGRP_ROUTE_INTERNAL) { |
710 |
|
|
/* |
711 |
|
|
* connected routes should always be prefered over |
712 |
|
|
* received routes independent of the metric. |
713 |
|
|
*/ |
714 |
|
|
if (route->nbr->flags & F_RDE_NBR_LOCAL) |
715 |
|
|
return (route); |
716 |
|
|
|
717 |
|
|
external_only = 0; |
718 |
|
|
} |
719 |
|
|
|
720 |
|
|
TAILQ_FOREACH(route, &rn->routes, entry) { |
721 |
|
|
/* |
722 |
|
|
* draft-savage-eigrp-04 - Section 5.4.7: |
723 |
|
|
* "Internal routes MUST be prefered over external routes |
724 |
|
|
* independent of the metric." |
725 |
|
|
*/ |
726 |
|
|
if (route->type == EIGRP_ROUTE_EXTERNAL && !external_only) |
727 |
|
|
continue; |
728 |
|
|
|
729 |
|
|
/* pick the best route that meets the feasibility condition */ |
730 |
|
|
if (route->rdistance < rn->successor.fdistance && |
731 |
|
|
route->distance < distance) { |
732 |
|
|
distance = route->distance; |
733 |
|
|
successor = route; |
734 |
|
|
} |
735 |
|
|
} |
736 |
|
|
|
737 |
|
|
return (successor); |
738 |
|
|
} |
739 |
|
|
|
740 |
|
|
struct summary_addr * |
741 |
|
|
rde_summary_check(struct eigrp_iface *ei, union eigrpd_addr *prefix, |
742 |
|
|
uint8_t prefixlen) |
743 |
|
|
{ |
744 |
|
|
struct summary_addr *summary; |
745 |
|
|
|
746 |
|
|
TAILQ_FOREACH(summary, &ei->summary_list, entry) { |
747 |
|
|
/* do not filter the summary itself */ |
748 |
|
|
if (summary->prefixlen == prefixlen && |
749 |
|
|
!eigrp_addrcmp(ei->eigrp->af, prefix, &summary->prefix)) |
750 |
|
|
return (NULL); |
751 |
|
|
|
752 |
|
|
if (summary->prefixlen <= prefixlen && |
753 |
|
|
!eigrp_prefixcmp(ei->eigrp->af, prefix, &summary->prefix, |
754 |
|
|
summary->prefixlen)) |
755 |
|
|
return (summary); |
756 |
|
|
} |
757 |
|
|
|
758 |
|
|
return (NULL); |
759 |
|
|
} |
760 |
|
|
|
761 |
|
|
static void |
762 |
|
|
rde_send_update(struct eigrp_iface *ei, struct rinfo *ri) |
763 |
|
|
{ |
764 |
|
|
if (ri->metric.hop_count >= ei->eigrp->maximum_hops || |
765 |
|
|
rde_summary_check(ei, &ri->prefix, ri->prefixlen)) |
766 |
|
|
ri->metric.delay = EIGRP_INFINITE_METRIC; |
767 |
|
|
|
768 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_MUPDATE, ei->ifaceid, 0, |
769 |
|
|
ri, sizeof(*ri)); |
770 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_MUPDATE_END, ei->ifaceid, 0, |
771 |
|
|
NULL, 0); |
772 |
|
|
} |
773 |
|
|
|
774 |
|
|
static void |
775 |
|
|
rde_send_update_all(struct rt_node *rn, struct rinfo *ri) |
776 |
|
|
{ |
777 |
|
|
struct eigrp *eigrp = rn->eigrp; |
778 |
|
|
struct eigrp_iface *ei; |
779 |
|
|
|
780 |
|
|
TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry) { |
781 |
|
|
/* respect split-horizon configuration */ |
782 |
|
|
if (rn->successor.nbr && rn->successor.nbr->ei == ei && |
783 |
|
|
ei->splithorizon) |
784 |
|
|
continue; |
785 |
|
|
rde_send_update(ei, ri); |
786 |
|
|
} |
787 |
|
|
} |
788 |
|
|
|
789 |
|
|
static void |
790 |
|
|
rde_send_query(struct eigrp_iface *ei, struct rinfo *ri, int push) |
791 |
|
|
{ |
792 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_MQUERY, ei->ifaceid, 0, |
793 |
|
|
ri, sizeof(*ri)); |
794 |
|
|
if (push) |
795 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_MQUERY_END, ei->ifaceid, |
796 |
|
|
0, NULL, 0); |
797 |
|
|
} |
798 |
|
|
|
799 |
|
|
static void |
800 |
|
|
rde_send_siaquery(struct rde_nbr *nbr, struct rinfo *ri) |
801 |
|
|
{ |
802 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_QUERY, nbr->peerid, 0, |
803 |
|
|
ri, sizeof(*ri)); |
804 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_SIAQUERY_END, nbr->peerid, 0, |
805 |
|
|
NULL, 0); |
806 |
|
|
} |
807 |
|
|
|
808 |
|
|
static void |
809 |
|
|
rde_send_query_all(struct eigrp *eigrp, struct rt_node *rn, int push) |
810 |
|
|
{ |
811 |
|
|
struct eigrp_iface *ei; |
812 |
|
|
struct rde_nbr *nbr; |
813 |
|
|
struct rinfo ri; |
814 |
|
|
|
815 |
|
|
rinfo_fill_successor(rn, &ri); |
816 |
|
|
ri.metric.flags |= F_METRIC_ACTIVE; |
817 |
|
|
|
818 |
|
|
TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry) { |
819 |
|
|
/* respect split-horizon configuration */ |
820 |
|
|
if (rn->successor.nbr && rn->successor.nbr->ei == ei && |
821 |
|
|
ei->splithorizon) |
822 |
|
|
continue; |
823 |
|
|
|
824 |
|
|
rde_send_query(ei, &ri, push); |
825 |
|
|
} |
826 |
|
|
|
827 |
|
|
RB_FOREACH(nbr, rde_nbr_head, &rde_nbrs) |
828 |
|
|
if (nbr->ei->eigrp == eigrp && !(nbr->flags & F_RDE_NBR_SELF)) { |
829 |
|
|
/* respect split-horizon configuration */ |
830 |
|
|
if (rn->successor.nbr && |
831 |
|
|
rn->successor.nbr->ei == nbr->ei && |
832 |
|
|
nbr->ei->splithorizon) |
833 |
|
|
continue; |
834 |
|
|
|
835 |
|
|
reply_outstanding_add(rn, nbr); |
836 |
|
|
} |
837 |
|
|
} |
838 |
|
|
|
839 |
|
|
void |
840 |
|
|
rde_flush_queries(void) |
841 |
|
|
{ |
842 |
|
|
struct eigrp *eigrp; |
843 |
|
|
struct eigrp_iface *ei; |
844 |
|
|
|
845 |
|
|
TAILQ_FOREACH(eigrp, &rdeconf->instances, entry) |
846 |
|
|
TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry) |
847 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_MQUERY_END, |
848 |
|
|
ei->ifaceid, 0, NULL, 0); |
849 |
|
|
} |
850 |
|
|
|
851 |
|
|
static void |
852 |
|
|
rde_send_reply(struct rde_nbr *nbr, struct rinfo *ri, int siareply) |
853 |
|
|
{ |
854 |
|
|
int type; |
855 |
|
|
|
856 |
|
|
if (ri->metric.hop_count >= nbr->eigrp->maximum_hops || |
857 |
|
|
rde_summary_check(nbr->ei, &ri->prefix, ri->prefixlen)) |
858 |
|
|
ri->metric.delay = EIGRP_INFINITE_METRIC; |
859 |
|
|
|
860 |
|
|
if (!siareply) |
861 |
|
|
type = IMSG_SEND_REPLY_END; |
862 |
|
|
else |
863 |
|
|
type = IMSG_SEND_SIAREPLY_END; |
864 |
|
|
|
865 |
|
|
rde_imsg_compose_eigrpe(IMSG_SEND_REPLY, nbr->peerid, 0, |
866 |
|
|
ri, sizeof(*ri)); |
867 |
|
|
rde_imsg_compose_eigrpe(type, nbr->peerid, 0, NULL, 0); |
868 |
|
|
} |
869 |
|
|
|
870 |
|
|
void |
871 |
|
|
rde_check_update(struct rde_nbr *nbr, struct rinfo *ri) |
872 |
|
|
{ |
873 |
|
|
struct eigrp *eigrp = nbr->eigrp; |
874 |
|
|
struct rt_node *rn; |
875 |
|
|
struct eigrp_route *route, *successor; |
876 |
|
|
uint32_t old_fdistance; |
877 |
|
|
struct rinfo sri; |
878 |
|
|
|
879 |
|
|
rn = rt_find(eigrp, ri); |
880 |
|
|
if (rn == NULL) { |
881 |
|
|
if (ri->metric.delay == EIGRP_INFINITE_METRIC) |
882 |
|
|
return; |
883 |
|
|
|
884 |
|
|
rn = rt_new(eigrp, ri); |
885 |
|
|
route = route_new(rn, nbr, ri); |
886 |
|
|
|
887 |
|
|
old_fdistance = EIGRP_INFINITE_METRIC; |
888 |
|
|
} else { |
889 |
|
|
old_fdistance = rn->successor.fdistance; |
890 |
|
|
|
891 |
|
|
if (ri->metric.delay == EIGRP_INFINITE_METRIC) { |
892 |
|
|
route = route_find(nbr, rn); |
893 |
|
|
if (route) |
894 |
|
|
route_del(rn, route); |
895 |
|
|
} else { |
896 |
|
|
route = route_find(nbr, rn); |
897 |
|
|
if (route == NULL) |
898 |
|
|
route = route_new(rn, nbr, ri); |
899 |
|
|
else |
900 |
|
|
route_update_metrics(eigrp, route, ri); |
901 |
|
|
} |
902 |
|
|
} |
903 |
|
|
|
904 |
|
|
switch (rn->state) { |
905 |
|
|
case DUAL_STA_PASSIVE: |
906 |
|
|
successor = rt_get_successor_fc(rn); |
907 |
|
|
|
908 |
|
|
/* |
909 |
|
|
* go active if the successor was affected and no feasible |
910 |
|
|
* successor exist. |
911 |
|
|
*/ |
912 |
|
|
if (successor == NULL) { |
913 |
|
|
rde_send_query_all(eigrp, rn, 1); |
914 |
|
|
|
915 |
|
|
dual_fsm(rn, DUAL_EVT_4); |
916 |
|
|
} else { |
917 |
|
|
rt_set_successor(rn, successor); |
918 |
|
|
rt_update_fib(rn); |
919 |
|
|
|
920 |
|
|
/* send update with new metric if necessary */ |
921 |
|
|
rinfo_fill_successor(rn, &sri); |
922 |
|
|
if (rn->successor.fdistance != old_fdistance) |
923 |
|
|
rde_send_update_all(rn, &sri); |
924 |
|
|
} |
925 |
|
|
break; |
926 |
|
|
case DUAL_STA_ACTIVE1: |
927 |
|
|
/* XXX event 9 if cost increase? */ |
928 |
|
|
break; |
929 |
|
|
case DUAL_STA_ACTIVE3: |
930 |
|
|
/* XXX event 10 if cost increase? */ |
931 |
|
|
break; |
932 |
|
|
} |
933 |
|
|
|
934 |
|
|
if ((rn->state & DUAL_STA_ACTIVE_ALL) && TAILQ_EMPTY(&rn->rijk)) |
935 |
|
|
rde_last_reply(rn); |
936 |
|
|
} |
937 |
|
|
|
938 |
|
|
void |
939 |
|
|
rde_check_query(struct rde_nbr *nbr, struct rinfo *ri, int siaquery) |
940 |
|
|
{ |
941 |
|
|
struct eigrp *eigrp = nbr->eigrp; |
942 |
|
|
struct rt_node *rn; |
943 |
|
|
struct eigrp_route *route, *successor; |
944 |
|
|
uint32_t old_fdistance; |
945 |
|
|
struct rinfo sri; |
946 |
|
|
int reply_sent = 0; |
947 |
|
|
|
948 |
|
|
/* |
949 |
|
|
* draft-savage-eigrp-02 - Section 4.3: |
950 |
|
|
* "When a query is received for a route that doesn't exist in our |
951 |
|
|
* topology table, a reply with infinite metric is sent and an entry |
952 |
|
|
* in the topology table is added with the metric in the QUERY if |
953 |
|
|
* the metric is not an infinite value". |
954 |
|
|
*/ |
955 |
|
|
rn = rt_find(eigrp, ri); |
956 |
|
|
if (rn == NULL) { |
957 |
|
|
sri = *ri; |
958 |
|
|
sri.metric.delay = EIGRP_INFINITE_METRIC; |
959 |
|
|
rde_send_reply(nbr, &sri, 0); |
960 |
|
|
|
961 |
|
|
if (ri->metric.delay == EIGRP_INFINITE_METRIC) |
962 |
|
|
return; |
963 |
|
|
|
964 |
|
|
rn = rt_new(eigrp, ri); |
965 |
|
|
route = route_new(rn, nbr, ri); |
966 |
|
|
rt_set_successor(rn, route); |
967 |
|
|
return; |
968 |
|
|
} |
969 |
|
|
|
970 |
|
|
old_fdistance = rn->successor.fdistance; |
971 |
|
|
|
972 |
|
|
if (ri->metric.delay == EIGRP_INFINITE_METRIC) { |
973 |
|
|
route = route_find(nbr, rn); |
974 |
|
|
if (route) |
975 |
|
|
route_del(rn, route); |
976 |
|
|
} else { |
977 |
|
|
route = route_find(nbr, rn); |
978 |
|
|
if (route == NULL) |
979 |
|
|
route = route_new(rn, nbr, ri); |
980 |
|
|
else |
981 |
|
|
route_update_metrics(eigrp, route, ri); |
982 |
|
|
} |
983 |
|
|
|
984 |
|
|
switch (rn->state) { |
985 |
|
|
case DUAL_STA_PASSIVE: |
986 |
|
|
successor = rt_get_successor_fc(rn); |
987 |
|
|
|
988 |
|
|
/* |
989 |
|
|
* go active if the successor was affected and no feasible |
990 |
|
|
* successor exist. |
991 |
|
|
*/ |
992 |
|
|
if (successor == NULL) { |
993 |
|
|
rde_send_query_all(eigrp, rn, 1); |
994 |
|
|
dual_fsm(rn, DUAL_EVT_3); |
995 |
|
|
} else { |
996 |
|
|
rt_set_successor(rn, successor); |
997 |
|
|
rt_update_fib(rn); |
998 |
|
|
|
999 |
|
|
/* send reply */ |
1000 |
|
|
rinfo_fill_successor(rn, &sri); |
1001 |
|
|
rde_send_reply(nbr, &sri, 0); |
1002 |
|
|
reply_sent = 1; |
1003 |
|
|
|
1004 |
|
|
/* send update with new metric if necessary */ |
1005 |
|
|
if (rn->successor.fdistance != old_fdistance) |
1006 |
|
|
rde_send_update_all(rn, &sri); |
1007 |
|
|
} |
1008 |
|
|
break; |
1009 |
|
|
case DUAL_STA_ACTIVE0: |
1010 |
|
|
case DUAL_STA_ACTIVE1: |
1011 |
|
|
if (nbr == rn->successor.nbr) |
1012 |
|
|
dual_fsm(rn, DUAL_EVT_5); |
1013 |
|
|
else { |
1014 |
|
|
dual_fsm(rn, DUAL_EVT_6); |
1015 |
|
|
rinfo_fill_successor(rn, &sri); |
1016 |
|
|
sri.metric.flags |= F_METRIC_ACTIVE; |
1017 |
|
|
rde_send_reply(nbr, &sri, 0); |
1018 |
|
|
reply_sent = 1; |
1019 |
|
|
} |
1020 |
|
|
break; |
1021 |
|
|
case DUAL_STA_ACTIVE2: |
1022 |
|
|
case DUAL_STA_ACTIVE3: |
1023 |
|
|
if (nbr == rn->successor.nbr) { |
1024 |
|
|
/* XXX not defined in the spec, do nothing? */ |
1025 |
|
|
} else { |
1026 |
|
|
dual_fsm(rn, DUAL_EVT_6); |
1027 |
|
|
rinfo_fill_successor(rn, &sri); |
1028 |
|
|
sri.metric.flags |= F_METRIC_ACTIVE; |
1029 |
|
|
rde_send_reply(nbr, &sri, 0); |
1030 |
|
|
reply_sent = 1; |
1031 |
|
|
} |
1032 |
|
|
break; |
1033 |
|
|
} |
1034 |
|
|
|
1035 |
|
|
if ((rn->state & DUAL_STA_ACTIVE_ALL) && TAILQ_EMPTY(&rn->rijk)) |
1036 |
|
|
rde_last_reply(rn); |
1037 |
|
|
|
1038 |
|
|
if (siaquery && !reply_sent) { |
1039 |
|
|
rinfo_fill_successor(rn, &sri); |
1040 |
|
|
sri.metric.flags |= F_METRIC_ACTIVE; |
1041 |
|
|
rde_send_reply(nbr, &sri, 1); |
1042 |
|
|
} |
1043 |
|
|
} |
1044 |
|
|
|
1045 |
|
|
static void |
1046 |
|
|
rde_last_reply(struct rt_node *rn) |
1047 |
|
|
{ |
1048 |
|
|
struct eigrp *eigrp = rn->eigrp; |
1049 |
|
|
struct eigrp_route *successor; |
1050 |
|
|
struct rde_nbr *old_successor; |
1051 |
|
|
uint32_t old_fdistance; |
1052 |
|
|
struct rinfo ri; |
1053 |
|
|
|
1054 |
|
|
old_successor = rn->successor.nbr; |
1055 |
|
|
old_fdistance = rn->successor.fdistance; |
1056 |
|
|
|
1057 |
|
|
switch (rn->state) { |
1058 |
|
|
case DUAL_STA_ACTIVE0: |
1059 |
|
|
successor = rt_get_successor_fc(rn); |
1060 |
|
|
if (successor == NULL) { |
1061 |
|
|
/* feasibility condition is not met */ |
1062 |
|
|
rde_send_query_all(eigrp, rn, 1); |
1063 |
|
|
dual_fsm(rn, DUAL_EVT_11); |
1064 |
|
|
break; |
1065 |
|
|
} |
1066 |
|
|
|
1067 |
|
|
/* update successor - feasibility condition is met */ |
1068 |
|
|
rt_set_successor(rn, successor); |
1069 |
|
|
|
1070 |
|
|
/* advertise new successor to neighbors */ |
1071 |
|
|
rinfo_fill_successor(rn, &ri); |
1072 |
|
|
rde_send_update_all(rn, &ri); |
1073 |
|
|
|
1074 |
|
|
dual_fsm(rn, DUAL_EVT_14); |
1075 |
|
|
break; |
1076 |
|
|
case DUAL_STA_ACTIVE1: |
1077 |
|
|
/* update successor */ |
1078 |
|
|
rn->successor.fdistance = EIGRP_INFINITE_METRIC; |
1079 |
|
|
successor = rt_get_successor_fc(rn); |
1080 |
|
|
rt_set_successor(rn, successor); |
1081 |
|
|
|
1082 |
|
|
/* advertise new successor to neighbors */ |
1083 |
|
|
rinfo_fill_successor(rn, &ri); |
1084 |
|
|
rde_send_update_all(rn, &ri); |
1085 |
|
|
|
1086 |
|
|
dual_fsm(rn, DUAL_EVT_15); |
1087 |
|
|
break; |
1088 |
|
|
case DUAL_STA_ACTIVE2: |
1089 |
|
|
successor = rt_get_successor_fc(rn); |
1090 |
|
|
if (successor == NULL) { |
1091 |
|
|
/* feasibility condition is not met */ |
1092 |
|
|
rde_send_query_all(eigrp, rn, 1); |
1093 |
|
|
dual_fsm(rn, DUAL_EVT_12); |
1094 |
|
|
break; |
1095 |
|
|
} |
1096 |
|
|
|
1097 |
|
|
/* update successor - feasibility condition is met */ |
1098 |
|
|
rt_set_successor(rn, successor); |
1099 |
|
|
|
1100 |
|
|
/* send a reply to the old successor */ |
1101 |
|
|
rinfo_fill_successor(rn, &ri); |
1102 |
|
|
ri.metric.flags |= F_METRIC_ACTIVE; |
1103 |
|
|
if (old_successor) |
1104 |
|
|
rde_send_reply(old_successor, &ri, 0); |
1105 |
|
|
|
1106 |
|
|
/* advertise new successor to neighbors */ |
1107 |
|
|
rde_send_update_all(rn, &ri); |
1108 |
|
|
|
1109 |
|
|
dual_fsm(rn, DUAL_EVT_16); |
1110 |
|
|
break; |
1111 |
|
|
case DUAL_STA_ACTIVE3: |
1112 |
|
|
/* update successor */ |
1113 |
|
|
rn->successor.fdistance = EIGRP_INFINITE_METRIC; |
1114 |
|
|
successor = rt_get_successor_fc(rn); |
1115 |
|
|
rt_set_successor(rn, successor); |
1116 |
|
|
|
1117 |
|
|
/* send a reply to the old successor */ |
1118 |
|
|
rinfo_fill_successor(rn, &ri); |
1119 |
|
|
ri.metric.flags |= F_METRIC_ACTIVE; |
1120 |
|
|
if (old_successor) |
1121 |
|
|
rde_send_reply(old_successor, &ri, 0); |
1122 |
|
|
|
1123 |
|
|
/* advertise new successor to neighbors */ |
1124 |
|
|
rde_send_update_all(rn, &ri); |
1125 |
|
|
|
1126 |
|
|
dual_fsm(rn, DUAL_EVT_13); |
1127 |
|
|
break; |
1128 |
|
|
} |
1129 |
|
|
|
1130 |
|
|
if (rn->state == DUAL_STA_PASSIVE && rn->successor.nbr == NULL) |
1131 |
|
|
rt_del(rn); |
1132 |
|
|
} |
1133 |
|
|
|
1134 |
|
|
void |
1135 |
|
|
rde_check_reply(struct rde_nbr *nbr, struct rinfo *ri, int siareply) |
1136 |
|
|
{ |
1137 |
|
|
struct eigrp *eigrp = nbr->eigrp; |
1138 |
|
|
struct rt_node *rn; |
1139 |
|
|
struct reply_node *reply; |
1140 |
|
|
struct eigrp_route *route; |
1141 |
|
|
|
1142 |
|
|
rn = rt_find(eigrp, ri); |
1143 |
|
|
if (rn == NULL) |
1144 |
|
|
return; |
1145 |
|
|
|
1146 |
|
|
/* XXX ignore reply when the state is passive? */ |
1147 |
|
|
if (rn->state == DUAL_STA_PASSIVE) |
1148 |
|
|
return; |
1149 |
|
|
|
1150 |
|
|
reply = reply_outstanding_find(rn, nbr); |
1151 |
|
|
if (reply == NULL) |
1152 |
|
|
return; |
1153 |
|
|
|
1154 |
|
|
if (siareply) { |
1155 |
|
|
reply->siareply_recv = 1; |
1156 |
|
|
reply_active_start_timer(reply); |
1157 |
|
|
return; |
1158 |
|
|
} |
1159 |
|
|
|
1160 |
|
|
if (ri->metric.delay == EIGRP_INFINITE_METRIC) { |
1161 |
|
|
route = route_find(nbr, rn); |
1162 |
|
|
if (route) |
1163 |
|
|
route_del(rn, route); |
1164 |
|
|
} else { |
1165 |
|
|
route = route_find(nbr, rn); |
1166 |
|
|
if (route == NULL) |
1167 |
|
|
route = route_new(rn, nbr, ri); |
1168 |
|
|
else |
1169 |
|
|
route_update_metrics(eigrp, route, ri); |
1170 |
|
|
} |
1171 |
|
|
|
1172 |
|
|
reply_outstanding_remove(reply); |
1173 |
|
|
if (TAILQ_EMPTY(&rn->rijk)) |
1174 |
|
|
rde_last_reply(rn); |
1175 |
|
|
} |
1176 |
|
|
|
1177 |
|
|
void |
1178 |
|
|
rde_check_link_down_rn(struct rde_nbr *nbr, struct rt_node *rn, |
1179 |
|
|
struct eigrp_route *route) |
1180 |
|
|
{ |
1181 |
|
|
struct eigrp *eigrp = nbr->eigrp; |
1182 |
|
|
struct reply_node *reply; |
1183 |
|
|
struct eigrp_route *successor; |
1184 |
|
|
uint32_t old_fdistance; |
1185 |
|
|
struct rinfo ri; |
1186 |
|
|
enum route_type type; |
1187 |
|
|
|
1188 |
|
|
old_fdistance = rn->successor.fdistance; |
1189 |
|
|
|
1190 |
|
|
type = route->type; |
1191 |
|
|
route_del(rn, route); |
1192 |
|
|
|
1193 |
|
|
switch (rn->state) { |
1194 |
|
|
case DUAL_STA_PASSIVE: |
1195 |
|
|
successor = rt_get_successor_fc(rn); |
1196 |
|
|
|
1197 |
|
|
/* |
1198 |
|
|
* go active if the successor was affected and no feasible |
1199 |
|
|
* successor exist. |
1200 |
|
|
*/ |
1201 |
|
|
if (successor == NULL) { |
1202 |
|
|
rde_send_query_all(eigrp, rn, 0); |
1203 |
|
|
|
1204 |
|
|
dual_fsm(rn, DUAL_EVT_4); |
1205 |
|
|
} else { |
1206 |
|
|
rt_set_successor(rn, successor); |
1207 |
|
|
rt_update_fib(rn); |
1208 |
|
|
|
1209 |
|
|
/* send update with new metric if necessary */ |
1210 |
|
|
rinfo_fill_successor(rn, &ri); |
1211 |
|
|
if (rn->successor.fdistance != old_fdistance) |
1212 |
|
|
rde_send_update_all(rn, &ri); |
1213 |
|
|
} |
1214 |
|
|
break; |
1215 |
|
|
case DUAL_STA_ACTIVE1: |
1216 |
|
|
if (nbr == rn->successor.nbr) |
1217 |
|
|
dual_fsm(rn, DUAL_EVT_9); |
1218 |
|
|
break; |
1219 |
|
|
case DUAL_STA_ACTIVE3: |
1220 |
|
|
if (nbr == rn->successor.nbr) |
1221 |
|
|
dual_fsm(rn, DUAL_EVT_10); |
1222 |
|
|
break; |
1223 |
|
|
} |
1224 |
|
|
|
1225 |
|
|
if (rn->state & DUAL_STA_ACTIVE_ALL) { |
1226 |
|
|
reply = reply_outstanding_find(rn, nbr); |
1227 |
|
|
if (reply) { |
1228 |
|
|
rinfo_fill_infinite(rn, type, &ri); |
1229 |
|
|
rde_check_reply(nbr, &ri, 0); |
1230 |
|
|
} |
1231 |
|
|
} |
1232 |
|
|
} |
1233 |
|
|
|
1234 |
|
|
void |
1235 |
|
|
rde_check_link_down_nbr(struct rde_nbr *nbr) |
1236 |
|
|
{ |
1237 |
|
|
struct eigrp *eigrp = nbr->eigrp; |
1238 |
|
|
struct rt_node *rn, *safe; |
1239 |
|
|
struct eigrp_route *route; |
1240 |
|
|
|
1241 |
|
|
RB_FOREACH_SAFE(rn, rt_tree, &eigrp->topology, safe) { |
1242 |
|
|
route = route_find(nbr, rn); |
1243 |
|
|
if (route) { |
1244 |
|
|
rde_check_link_down_rn(nbr, rn, route); |
1245 |
|
|
if (rn->successor.nbr == nbr) |
1246 |
|
|
rn->successor.nbr = NULL; |
1247 |
|
|
} |
1248 |
|
|
} |
1249 |
|
|
} |
1250 |
|
|
|
1251 |
|
|
void |
1252 |
|
|
rde_check_link_down(unsigned int ifindex) |
1253 |
|
|
{ |
1254 |
|
|
struct rde_nbr *nbr; |
1255 |
|
|
|
1256 |
|
|
RB_FOREACH(nbr, rde_nbr_head, &rde_nbrs) |
1257 |
|
|
if (nbr->ei->iface->ifindex == ifindex) |
1258 |
|
|
rde_check_link_down_nbr(nbr); |
1259 |
|
|
|
1260 |
|
|
rde_flush_queries(); |
1261 |
|
|
} |
1262 |
|
|
|
1263 |
|
|
void |
1264 |
|
|
rde_check_link_cost_change(struct rde_nbr *nbr, struct eigrp_iface *ei) |
1265 |
|
|
{ |
1266 |
|
|
} |
1267 |
|
|
|
1268 |
|
|
static __inline int |
1269 |
|
|
rde_nbr_compare(struct rde_nbr *a, struct rde_nbr *b) |
1270 |
|
|
{ |
1271 |
|
|
return (a->peerid - b->peerid); |
1272 |
|
|
} |
1273 |
|
|
|
1274 |
|
|
struct rde_nbr * |
1275 |
|
|
rde_nbr_find(uint32_t peerid) |
1276 |
|
|
{ |
1277 |
|
|
struct rde_nbr n; |
1278 |
|
|
|
1279 |
|
|
n.peerid = peerid; |
1280 |
|
|
|
1281 |
|
|
return (RB_FIND(rde_nbr_head, &rde_nbrs, &n)); |
1282 |
|
|
} |
1283 |
|
|
|
1284 |
|
|
struct rde_nbr * |
1285 |
|
|
rde_nbr_new(uint32_t peerid, struct rde_nbr *new) |
1286 |
|
|
{ |
1287 |
|
|
struct rde_nbr *nbr; |
1288 |
|
|
|
1289 |
|
|
if ((nbr = calloc(1, sizeof(*nbr))) == NULL) |
1290 |
|
|
fatal("rde_nbr_new"); |
1291 |
|
|
|
1292 |
|
|
nbr->peerid = peerid; |
1293 |
|
|
nbr->ifaceid = new->ifaceid; |
1294 |
|
|
nbr->addr = new->addr; |
1295 |
|
|
nbr->ei = eigrp_if_lookup_id(nbr->ifaceid); |
1296 |
|
|
if (nbr->ei) |
1297 |
|
|
nbr->eigrp = nbr->ei->eigrp; |
1298 |
|
|
TAILQ_INIT(&nbr->rijk); |
1299 |
|
|
nbr->flags = new->flags; |
1300 |
|
|
|
1301 |
|
|
if (nbr->peerid != NBR_IDSELF && |
1302 |
|
|
RB_INSERT(rde_nbr_head, &rde_nbrs, nbr) != NULL) |
1303 |
|
|
fatalx("rde_nbr_new: RB_INSERT failed"); |
1304 |
|
|
|
1305 |
|
|
return (nbr); |
1306 |
|
|
} |
1307 |
|
|
|
1308 |
|
|
void |
1309 |
|
|
rde_nbr_del(struct rde_nbr *nbr, int peerterm) |
1310 |
|
|
{ |
1311 |
|
|
struct reply_node *reply; |
1312 |
|
|
|
1313 |
|
|
if (peerterm) |
1314 |
|
|
rde_imsg_compose_eigrpe(IMSG_NEIGHBOR_DOWN, nbr->peerid, |
1315 |
|
|
0, NULL, 0); |
1316 |
|
|
|
1317 |
|
|
while((reply = TAILQ_FIRST(&nbr->rijk)) != NULL) |
1318 |
|
|
reply_outstanding_remove(reply); |
1319 |
|
|
|
1320 |
|
|
if (nbr->peerid != NBR_IDSELF) |
1321 |
|
|
RB_REMOVE(rde_nbr_head, &rde_nbrs, nbr); |
1322 |
|
|
free(nbr); |
1323 |
|
|
} |