Line data Source code
1 : /* $OpenBSD: hfsc.c,v 1.47 2018/04/13 14:09:42 mikeb Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2012-2013 Henning Brauer <henning@openbsd.org>
5 : * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 : *
7 : * Permission to use, copy, modify, and distribute this software and
8 : * its documentation is hereby granted (including for commercial or
9 : * for-profit use), provided that both the copyright notice and this
10 : * permission notice appear in all copies of the software, derivative
11 : * works, or modified versions, and any portions thereof.
12 : *
13 : * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 : * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 : * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 : * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 : * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 : * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 : * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 : * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 : * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 : * DAMAGE.
27 : *
28 : * Carnegie Mellon encourages (but does not require) users of this
29 : * software to return any improvements or extensions that they make,
30 : * and to grant Carnegie Mellon the rights to redistribute these
31 : * changes without encumbrance.
32 : */
33 : /*
34 : * H-FSC is described in Proceedings of SIGCOMM'97,
35 : * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 : * Real-Time and Priority Service"
37 : * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38 : *
39 : * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 : * when a class has an upperlimit, the fit-time is computed from the
41 : * upperlimit service curve. the link-sharing scheduler does not schedule
42 : * a class whose fit-time exceeds the current time.
43 : */
44 :
45 : #include <sys/param.h>
46 : #include <sys/malloc.h>
47 : #include <sys/pool.h>
48 : #include <sys/mbuf.h>
49 : #include <sys/socket.h>
50 : #include <sys/systm.h>
51 : #include <sys/errno.h>
52 : #include <sys/queue.h>
53 : #include <sys/kernel.h>
54 : #include <sys/timeout.h>
55 :
56 : #include <net/if.h>
57 : #include <net/if_var.h>
58 : #include <netinet/in.h>
59 :
60 : #include <net/pfvar.h>
61 : #include <net/hfsc.h>
62 :
63 : /*
64 : * kernel internal service curve representation
65 : * coordinates are given by 64 bit unsigned integers.
66 : * x-axis: unit is clock count. for the intel x86 architecture,
67 : * the raw Pentium TSC (Timestamp Counter) value is used.
68 : * virtual time is also calculated in this time scale.
69 : * y-axis: unit is byte.
70 : *
71 : * the service curve parameters are converted to the internal
72 : * representation.
73 : * the slope values are scaled to avoid overflow.
74 : * the inverse slope values as well as the y-projection of the 1st
75 : * segment are kept in order to to avoid 64-bit divide operations
76 : * that are expensive on 32-bit architectures.
77 : *
78 : * note: Intel Pentium TSC never wraps around in several thousands of years.
79 : * x-axis doesn't wrap around for 1089 years with 1GHz clock.
80 : * y-axis doesn't wrap around for 4358 years with 1Gbps bandwidth.
81 : */
82 :
83 : /* kernel internal representation of a service curve */
84 : struct hfsc_internal_sc {
85 : u_int64_t sm1; /* scaled slope of the 1st segment */
86 : u_int64_t ism1; /* scaled inverse-slope of the 1st segment */
87 : u_int64_t dx; /* the x-projection of the 1st segment */
88 : u_int64_t dy; /* the y-projection of the 1st segment */
89 : u_int64_t sm2; /* scaled slope of the 2nd segment */
90 : u_int64_t ism2; /* scaled inverse-slope of the 2nd segment */
91 : };
92 :
93 : /* runtime service curve */
94 : struct hfsc_runtime_sc {
95 : u_int64_t x; /* current starting position on x-axis */
96 : u_int64_t y; /* current starting position on x-axis */
97 : u_int64_t sm1; /* scaled slope of the 1st segment */
98 : u_int64_t ism1; /* scaled inverse-slope of the 1st segment */
99 : u_int64_t dx; /* the x-projection of the 1st segment */
100 : u_int64_t dy; /* the y-projection of the 1st segment */
101 : u_int64_t sm2; /* scaled slope of the 2nd segment */
102 : u_int64_t ism2; /* scaled inverse-slope of the 2nd segment */
103 : };
104 :
105 : struct hfsc_classq {
106 : struct mbuf_list q; /* Queue of packets */
107 : int qlimit; /* Queue limit */
108 : };
109 :
110 : /* for TAILQ based ellist and actlist implementation */
111 : struct hfsc_class;
112 : TAILQ_HEAD(hfsc_eligible, hfsc_class);
113 : TAILQ_HEAD(hfsc_active, hfsc_class);
114 : #define hfsc_actlist_last(s) TAILQ_LAST(s, hfsc_active)
115 :
116 : struct hfsc_class {
117 : u_int cl_id; /* class id (just for debug) */
118 : u_int32_t cl_handle; /* class handle */
119 : int cl_flags; /* misc flags */
120 :
121 : struct hfsc_class *cl_parent; /* parent class */
122 : struct hfsc_class *cl_siblings; /* sibling classes */
123 : struct hfsc_class *cl_children; /* child classes */
124 :
125 : struct hfsc_classq cl_q; /* class queue structure */
126 :
127 : const struct pfq_ops *cl_qops; /* queue manager */
128 : void *cl_qdata; /* queue manager data */
129 : void *cl_cookie; /* queue manager cookie */
130 :
131 : u_int64_t cl_total; /* total work in bytes */
132 : u_int64_t cl_cumul; /* cumulative work in bytes
133 : done by real-time criteria */
134 : u_int64_t cl_d; /* deadline */
135 : u_int64_t cl_e; /* eligible time */
136 : u_int64_t cl_vt; /* virtual time */
137 : u_int64_t cl_f; /* time when this class will fit for
138 : link-sharing, max(myf, cfmin) */
139 : u_int64_t cl_myf; /* my fit-time (as calculated from this
140 : class's own upperlimit curve) */
141 : u_int64_t cl_myfadj; /* my fit-time adjustment
142 : (to cancel history dependence) */
143 : u_int64_t cl_cfmin; /* earliest children's fit-time (used
144 : with cl_myf to obtain cl_f) */
145 : u_int64_t cl_cvtmin; /* minimal virtual time among the
146 : children fit for link-sharing
147 : (monotonic within a period) */
148 : u_int64_t cl_vtadj; /* intra-period cumulative vt
149 : adjustment */
150 : u_int64_t cl_vtoff; /* inter-period cumulative vt offset */
151 : u_int64_t cl_cvtmax; /* max child's vt in the last period */
152 :
153 : u_int64_t cl_initvt; /* init virtual time (for debugging) */
154 :
155 : struct hfsc_internal_sc *cl_rsc; /* internal real-time service curve */
156 : struct hfsc_internal_sc *cl_fsc; /* internal fair service curve */
157 : struct hfsc_internal_sc *cl_usc; /* internal upperlimit service curve */
158 : struct hfsc_runtime_sc cl_deadline; /* deadline curve */
159 : struct hfsc_runtime_sc cl_eligible; /* eligible curve */
160 : struct hfsc_runtime_sc cl_virtual; /* virtual curve */
161 : struct hfsc_runtime_sc cl_ulimit; /* upperlimit curve */
162 :
163 : u_int cl_vtperiod; /* vt period sequence no */
164 : u_int cl_parentperiod; /* parent's vt period seqno */
165 : int cl_nactive; /* number of active children */
166 : struct hfsc_active cl_actc; /* active children list */
167 :
168 : TAILQ_ENTRY(hfsc_class) cl_actlist; /* active children list entry */
169 : TAILQ_ENTRY(hfsc_class) cl_ellist; /* eligible list entry */
170 :
171 : struct {
172 : struct hfsc_pktcntr xmit_cnt;
173 : struct hfsc_pktcntr drop_cnt;
174 : u_int period;
175 : } cl_stats;
176 : };
177 :
178 : /*
179 : * hfsc interface state
180 : */
181 : struct hfsc_if {
182 : struct hfsc_if *hif_next; /* interface state list */
183 : struct hfsc_class *hif_rootclass; /* root class */
184 : struct hfsc_class *hif_defaultclass; /* default class */
185 : struct hfsc_class **hif_class_tbl;
186 :
187 : u_int64_t hif_microtime; /* time at deq_begin */
188 :
189 : u_int hif_allocated; /* # of slots in hif_class_tbl */
190 : u_int hif_classes; /* # of classes in the tree */
191 : u_int hif_classid; /* class id sequence number */
192 :
193 : struct hfsc_eligible hif_eligible; /* eligible list */
194 : struct timeout hif_defer; /* for queues that weren't ready */
195 : };
196 :
197 : /*
198 : * function prototypes
199 : */
200 : struct hfsc_class *hfsc_class_create(struct hfsc_if *,
201 : struct hfsc_sc *, struct hfsc_sc *,
202 : struct hfsc_sc *, struct hfsc_class *, int,
203 : int, int);
204 : int hfsc_class_destroy(struct hfsc_if *,
205 : struct hfsc_class *);
206 : struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
207 :
208 : void hfsc_cl_purge(struct hfsc_if *, struct hfsc_class *,
209 : struct mbuf_list *);
210 :
211 : void hfsc_update_sc(struct hfsc_if *, struct hfsc_class *, int);
212 : void hfsc_deferred(void *);
213 : void hfsc_update_cfmin(struct hfsc_class *);
214 : void hfsc_set_active(struct hfsc_if *, struct hfsc_class *, int);
215 : void hfsc_set_passive(struct hfsc_if *, struct hfsc_class *);
216 : void hfsc_init_ed(struct hfsc_if *, struct hfsc_class *, int);
217 : void hfsc_update_ed(struct hfsc_if *, struct hfsc_class *, int);
218 : void hfsc_update_d(struct hfsc_class *, int);
219 : void hfsc_init_vf(struct hfsc_class *, int);
220 : void hfsc_update_vf(struct hfsc_class *, int, u_int64_t);
221 : void hfsc_ellist_insert(struct hfsc_if *, struct hfsc_class *);
222 : void hfsc_ellist_remove(struct hfsc_if *, struct hfsc_class *);
223 : void hfsc_ellist_update(struct hfsc_if *, struct hfsc_class *);
224 : struct hfsc_class *hfsc_ellist_get_mindl(struct hfsc_if *, u_int64_t);
225 : void hfsc_actlist_insert(struct hfsc_class *);
226 : void hfsc_actlist_remove(struct hfsc_class *);
227 : void hfsc_actlist_update(struct hfsc_class *);
228 :
229 : struct hfsc_class *hfsc_actlist_firstfit(struct hfsc_class *,
230 : u_int64_t);
231 :
232 : static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
233 : static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
234 : static __inline u_int64_t m2sm(u_int);
235 : static __inline u_int64_t m2ism(u_int);
236 : static __inline u_int64_t d2dx(u_int);
237 : static __inline u_int sm2m(u_int64_t);
238 : static __inline u_int dx2d(u_int64_t);
239 :
240 : void hfsc_sc2isc(struct hfsc_sc *, struct hfsc_internal_sc *);
241 : void hfsc_rtsc_init(struct hfsc_runtime_sc *,
242 : struct hfsc_internal_sc *, u_int64_t, u_int64_t);
243 : u_int64_t hfsc_rtsc_y2x(struct hfsc_runtime_sc *, u_int64_t);
244 : u_int64_t hfsc_rtsc_x2y(struct hfsc_runtime_sc *, u_int64_t);
245 : void hfsc_rtsc_min(struct hfsc_runtime_sc *,
246 : struct hfsc_internal_sc *, u_int64_t, u_int64_t);
247 :
248 : void hfsc_getclstats(struct hfsc_class_stats *, struct hfsc_class *);
249 : struct hfsc_class *hfsc_clh2cph(struct hfsc_if *, u_int32_t);
250 :
251 : #define HFSC_CLK_SHIFT 8
252 : #define HFSC_FREQ (1000000 << HFSC_CLK_SHIFT)
253 : #define HFSC_CLK_PER_TICK (HFSC_FREQ / hz)
254 : #define HFSC_HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
255 :
256 : struct pool hfsc_class_pl, hfsc_internal_sc_pl;
257 :
258 : /*
259 : * ifqueue glue.
260 : */
261 :
262 : unsigned int hfsc_idx(unsigned int, const struct mbuf *);
263 : struct mbuf *hfsc_enq(struct ifqueue *, struct mbuf *);
264 : struct mbuf *hfsc_deq_begin(struct ifqueue *, void **);
265 : void hfsc_deq_commit(struct ifqueue *, struct mbuf *, void *);
266 : void hfsc_purge(struct ifqueue *, struct mbuf_list *);
267 : void *hfsc_alloc(unsigned int, void *);
268 : void hfsc_free(unsigned int, void *);
269 :
270 : const struct ifq_ops hfsc_ops = {
271 : hfsc_idx,
272 : hfsc_enq,
273 : hfsc_deq_begin,
274 : hfsc_deq_commit,
275 : hfsc_purge,
276 : hfsc_alloc,
277 : hfsc_free,
278 : };
279 :
280 : const struct ifq_ops * const ifq_hfsc_ops = &hfsc_ops;
281 :
282 : /*
283 : * pf queue glue.
284 : */
285 :
286 : void *hfsc_pf_alloc(struct ifnet *);
287 : int hfsc_pf_addqueue(void *, struct pf_queuespec *);
288 : void hfsc_pf_free(void *);
289 : int hfsc_pf_qstats(struct pf_queuespec *, void *, int *);
290 : unsigned int hfsc_pf_qlength(void *);
291 : struct mbuf * hfsc_pf_enqueue(void *, struct mbuf *);
292 : struct mbuf * hfsc_pf_deq_begin(void *, void **, struct mbuf_list *);
293 : void hfsc_pf_deq_commit(void *, struct mbuf *, void *);
294 : void hfsc_pf_purge(void *, struct mbuf_list *);
295 :
296 : const struct pfq_ops hfsc_pf_ops = {
297 : hfsc_pf_alloc,
298 : hfsc_pf_addqueue,
299 : hfsc_pf_free,
300 : hfsc_pf_qstats,
301 : hfsc_pf_qlength,
302 : hfsc_pf_enqueue,
303 : hfsc_pf_deq_begin,
304 : hfsc_pf_deq_commit,
305 : hfsc_pf_purge
306 : };
307 :
308 : const struct pfq_ops * const pfq_hfsc_ops = &hfsc_pf_ops;
309 :
310 : /*
311 : * shortcuts for repeated use
312 : */
313 : static inline unsigned int
314 0 : hfsc_class_qlength(struct hfsc_class *cl)
315 : {
316 : /* Only leaf classes have a queue */
317 0 : if (cl->cl_qops != NULL)
318 0 : return cl->cl_qops->pfq_qlength(cl->cl_qdata);
319 0 : return 0;
320 0 : }
321 :
322 : static inline struct mbuf *
323 0 : hfsc_class_enqueue(struct hfsc_class *cl, struct mbuf *m)
324 : {
325 0 : return cl->cl_qops->pfq_enqueue(cl->cl_qdata, m);
326 : }
327 :
328 : static inline struct mbuf *
329 0 : hfsc_class_deq_begin(struct hfsc_class *cl, struct mbuf_list *ml)
330 : {
331 0 : return cl->cl_qops->pfq_deq_begin(cl->cl_qdata, &cl->cl_cookie, ml);
332 : }
333 :
334 : static inline void
335 0 : hfsc_class_deq_commit(struct hfsc_class *cl, struct mbuf *m)
336 : {
337 0 : return cl->cl_qops->pfq_deq_commit(cl->cl_qdata, m, cl->cl_cookie);
338 : }
339 :
340 : static inline void
341 0 : hfsc_class_purge(struct hfsc_class *cl, struct mbuf_list *ml)
342 : {
343 : /* Only leaf classes have a queue */
344 0 : if (cl->cl_qops != NULL)
345 0 : return cl->cl_qops->pfq_purge(cl->cl_qdata, ml);
346 0 : }
347 :
348 : u_int64_t
349 0 : hfsc_microuptime(void)
350 : {
351 0 : struct timeval tv;
352 :
353 0 : microuptime(&tv);
354 0 : return (((u_int64_t)(tv.tv_sec) * 1000000 + tv.tv_usec) <<
355 : HFSC_CLK_SHIFT);
356 0 : }
357 :
358 : static inline u_int
359 0 : hfsc_more_slots(u_int current)
360 : {
361 0 : u_int want = current * 2;
362 :
363 0 : return (want > HFSC_MAX_CLASSES ? HFSC_MAX_CLASSES : want);
364 : }
365 :
366 : static void
367 0 : hfsc_grow_class_tbl(struct hfsc_if *hif, u_int howmany)
368 : {
369 : struct hfsc_class **newtbl, **old;
370 0 : size_t oldlen = sizeof(void *) * hif->hif_allocated;
371 :
372 0 : newtbl = mallocarray(howmany, sizeof(void *), M_DEVBUF,
373 : M_WAITOK | M_ZERO);
374 0 : old = hif->hif_class_tbl;
375 :
376 0 : memcpy(newtbl, old, oldlen);
377 0 : hif->hif_class_tbl = newtbl;
378 0 : hif->hif_allocated = howmany;
379 :
380 0 : free(old, M_DEVBUF, oldlen);
381 0 : }
382 :
383 : void
384 0 : hfsc_initialize(void)
385 : {
386 0 : pool_init(&hfsc_class_pl, sizeof(struct hfsc_class), 0,
387 : IPL_NONE, PR_WAITOK, "hfscclass", NULL);
388 0 : pool_init(&hfsc_internal_sc_pl, sizeof(struct hfsc_internal_sc), 0,
389 : IPL_NONE, PR_WAITOK, "hfscintsc", NULL);
390 0 : }
391 :
392 : void *
393 0 : hfsc_pf_alloc(struct ifnet *ifp)
394 : {
395 : struct hfsc_if *hif;
396 :
397 0 : KASSERT(ifp != NULL);
398 :
399 0 : hif = malloc(sizeof(*hif), M_DEVBUF, M_WAITOK | M_ZERO);
400 0 : TAILQ_INIT(&hif->hif_eligible);
401 0 : hif->hif_class_tbl = mallocarray(HFSC_DEFAULT_CLASSES, sizeof(void *),
402 : M_DEVBUF, M_WAITOK | M_ZERO);
403 0 : hif->hif_allocated = HFSC_DEFAULT_CLASSES;
404 :
405 0 : timeout_set(&hif->hif_defer, hfsc_deferred, ifp);
406 :
407 0 : return (hif);
408 : }
409 :
410 : int
411 0 : hfsc_pf_addqueue(void *arg, struct pf_queuespec *q)
412 : {
413 0 : struct hfsc_if *hif = arg;
414 : struct hfsc_class *cl, *parent, *np = NULL;
415 0 : struct hfsc_sc rtsc, lssc, ulsc;
416 : int error = 0;
417 :
418 0 : KASSERT(hif != NULL);
419 0 : KASSERT(q->qid != 0);
420 :
421 : /* Root queue must have non-zero linksharing parameters */
422 0 : if (q->linkshare.m1.absolute == 0 && q->linkshare.m2.absolute == 0 &&
423 0 : q->parent_qid == 0)
424 0 : return (EINVAL);
425 :
426 0 : if (q->parent_qid == 0 && hif->hif_rootclass == NULL) {
427 0 : np = hfsc_class_create(hif, NULL, NULL, NULL, NULL,
428 0 : 0, 0, HFSC_ROOT_CLASS | q->qid);
429 0 : if (np == NULL)
430 0 : return (EINVAL);
431 : parent = np;
432 0 : } else if ((parent = hfsc_clh2cph(hif, q->parent_qid)) == NULL)
433 0 : return (EINVAL);
434 :
435 0 : if (hfsc_clh2cph(hif, q->qid) != NULL) {
436 0 : hfsc_class_destroy(hif, np);
437 0 : return (EBUSY);
438 : }
439 :
440 0 : rtsc.m1 = q->realtime.m1.absolute;
441 0 : rtsc.d = q->realtime.d;
442 0 : rtsc.m2 = q->realtime.m2.absolute;
443 0 : lssc.m1 = q->linkshare.m1.absolute;
444 0 : lssc.d = q->linkshare.d;
445 0 : lssc.m2 = q->linkshare.m2.absolute;
446 0 : ulsc.m1 = q->upperlimit.m1.absolute;
447 0 : ulsc.d = q->upperlimit.d;
448 0 : ulsc.m2 = q->upperlimit.m2.absolute;
449 :
450 0 : if ((cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
451 0 : parent, q->qlimit, q->flags, q->qid)) == NULL) {
452 0 : hfsc_class_destroy(hif, np);
453 0 : return (ENOMEM);
454 : }
455 :
456 : /* Attach a queue manager if specified */
457 0 : cl->cl_qops = pf_queue_manager(q);
458 : /* Realtime class cannot be used with an external queue manager */
459 0 : if (cl->cl_qops == NULL || cl->cl_rsc != NULL) {
460 0 : cl->cl_qops = pfq_hfsc_ops;
461 0 : cl->cl_qdata = &cl->cl_q;
462 0 : } else {
463 0 : cl->cl_qdata = cl->cl_qops->pfq_alloc(q->kif->pfik_ifp);
464 0 : if (cl->cl_qdata == NULL) {
465 0 : cl->cl_qops = NULL;
466 0 : hfsc_class_destroy(hif, cl);
467 0 : hfsc_class_destroy(hif, np);
468 0 : return (ENOMEM);
469 : }
470 0 : error = cl->cl_qops->pfq_addqueue(cl->cl_qdata, q);
471 0 : if (error) {
472 0 : cl->cl_qops->pfq_free(cl->cl_qdata);
473 0 : cl->cl_qops = NULL;
474 0 : hfsc_class_destroy(hif, cl);
475 0 : hfsc_class_destroy(hif, np);
476 0 : return (error);
477 : }
478 : }
479 :
480 0 : KASSERT(cl->cl_qops != NULL);
481 0 : KASSERT(cl->cl_qdata != NULL);
482 :
483 0 : return (0);
484 0 : }
485 :
486 : int
487 0 : hfsc_pf_qstats(struct pf_queuespec *q, void *ubuf, int *nbytes)
488 : {
489 0 : struct ifnet *ifp = q->kif->pfik_ifp;
490 : struct hfsc_if *hif;
491 : struct hfsc_class *cl;
492 0 : struct hfsc_class_stats stats;
493 : int error = 0;
494 :
495 0 : if (ifp == NULL)
496 0 : return (EBADF);
497 :
498 0 : if (*nbytes < sizeof(stats))
499 0 : return (EINVAL);
500 :
501 0 : hif = ifq_q_enter(&ifp->if_snd, ifq_hfsc_ops);
502 0 : if (hif == NULL)
503 0 : return (EBADF);
504 :
505 0 : if ((cl = hfsc_clh2cph(hif, q->qid)) == NULL) {
506 0 : ifq_q_leave(&ifp->if_snd, hif);
507 0 : return (EINVAL);
508 : }
509 :
510 0 : hfsc_getclstats(&stats, cl);
511 0 : ifq_q_leave(&ifp->if_snd, hif);
512 :
513 0 : if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
514 0 : return (error);
515 :
516 0 : *nbytes = sizeof(stats);
517 0 : return (0);
518 0 : }
519 :
520 : void
521 0 : hfsc_pf_free(void *arg)
522 : {
523 0 : hfsc_free(0, arg);
524 0 : }
525 :
526 : unsigned int
527 0 : hfsc_pf_qlength(void *arg)
528 : {
529 0 : struct hfsc_classq *cq = arg;
530 :
531 0 : return ml_len(&cq->q);
532 : }
533 :
534 : struct mbuf *
535 0 : hfsc_pf_enqueue(void *arg, struct mbuf *m)
536 : {
537 0 : struct hfsc_classq *cq = arg;
538 :
539 0 : if (ml_len(&cq->q) >= cq->qlimit)
540 0 : return (m);
541 :
542 0 : ml_enqueue(&cq->q, m);
543 0 : m->m_pkthdr.pf.prio = IFQ_MAXPRIO;
544 0 : return (NULL);
545 0 : }
546 :
547 : struct mbuf *
548 0 : hfsc_pf_deq_begin(void *arg, void **cookiep, struct mbuf_list *free_ml)
549 : {
550 0 : struct hfsc_classq *cq = arg;
551 :
552 0 : return MBUF_LIST_FIRST(&cq->q);
553 : }
554 :
555 : void
556 0 : hfsc_pf_deq_commit(void *arg, struct mbuf *m, void *cookie)
557 : {
558 0 : struct hfsc_classq *cq = arg;
559 :
560 0 : ml_dequeue(&cq->q);
561 0 : }
562 :
563 : void
564 0 : hfsc_pf_purge(void *arg, struct mbuf_list *ml)
565 : {
566 0 : struct hfsc_classq *cq = arg;
567 :
568 0 : ml_enlist(ml, &cq->q);
569 0 : }
570 :
571 : unsigned int
572 0 : hfsc_idx(unsigned int nqueues, const struct mbuf *m)
573 : {
574 : /*
575 : * hfsc can only function on a single ifq and the stack understands
576 : * this. when the first ifq on an interface is switched to hfsc,
577 : * this gets used to map all mbufs to the first and only ifq that
578 : * is set up for hfsc.
579 : */
580 0 : return (0);
581 : }
582 :
583 : void *
584 0 : hfsc_alloc(unsigned int idx, void *q)
585 : {
586 0 : struct hfsc_if *hif = q;
587 :
588 0 : KASSERT(idx == 0); /* when hfsc is enabled we only use the first ifq */
589 0 : KASSERT(hif != NULL);
590 0 : return (hif);
591 : }
592 :
593 : void
594 0 : hfsc_free(unsigned int idx, void *q)
595 : {
596 0 : struct hfsc_if *hif = q;
597 : struct hfsc_class *cl;
598 : int i, restart;
599 :
600 0 : KERNEL_ASSERT_LOCKED();
601 0 : KASSERT(idx == 0); /* when hfsc is enabled we only use the first ifq */
602 :
603 0 : timeout_del(&hif->hif_defer);
604 :
605 0 : do {
606 : restart = 0;
607 0 : for (i = 0; i < hif->hif_allocated; i++) {
608 0 : cl = hif->hif_class_tbl[i];
609 0 : if (hfsc_class_destroy(hif, cl) == EBUSY)
610 0 : restart++;
611 : }
612 0 : } while (restart > 0);
613 :
614 0 : free(hif->hif_class_tbl, M_DEVBUF, hif->hif_allocated * sizeof(void *));
615 0 : free(hif, M_DEVBUF, sizeof(*hif));
616 0 : }
617 :
618 : void
619 0 : hfsc_purge(struct ifqueue *ifq, struct mbuf_list *ml)
620 : {
621 0 : struct hfsc_if *hif = ifq->ifq_q;
622 : struct hfsc_class *cl;
623 :
624 0 : for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
625 0 : hfsc_cl_purge(hif, cl, ml);
626 0 : }
627 :
628 : struct hfsc_class *
629 0 : hfsc_class_create(struct hfsc_if *hif, struct hfsc_sc *rsc,
630 : struct hfsc_sc *fsc, struct hfsc_sc *usc, struct hfsc_class *parent,
631 : int qlimit, int flags, int qid)
632 : {
633 : struct hfsc_class *cl, *p;
634 : int i, s;
635 :
636 0 : if (qlimit == 0)
637 0 : qlimit = HFSC_DEFAULT_QLIMIT;
638 :
639 0 : if (hif->hif_classes >= hif->hif_allocated) {
640 0 : u_int newslots = hfsc_more_slots(hif->hif_allocated);
641 :
642 0 : if (newslots == hif->hif_allocated)
643 0 : return (NULL);
644 0 : hfsc_grow_class_tbl(hif, newslots);
645 0 : }
646 :
647 0 : cl = pool_get(&hfsc_class_pl, PR_WAITOK | PR_ZERO);
648 0 : TAILQ_INIT(&cl->cl_actc);
649 :
650 0 : ml_init(&cl->cl_q.q);
651 0 : cl->cl_q.qlimit = qlimit;
652 0 : cl->cl_flags = flags;
653 :
654 0 : if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
655 0 : cl->cl_rsc = pool_get(&hfsc_internal_sc_pl, PR_WAITOK);
656 0 : hfsc_sc2isc(rsc, cl->cl_rsc);
657 0 : hfsc_rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
658 0 : hfsc_rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
659 0 : }
660 0 : if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
661 0 : cl->cl_fsc = pool_get(&hfsc_internal_sc_pl, PR_WAITOK);
662 0 : hfsc_sc2isc(fsc, cl->cl_fsc);
663 0 : hfsc_rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
664 0 : }
665 0 : if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
666 0 : cl->cl_usc = pool_get(&hfsc_internal_sc_pl, PR_WAITOK);
667 0 : hfsc_sc2isc(usc, cl->cl_usc);
668 0 : hfsc_rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
669 0 : }
670 :
671 0 : cl->cl_id = hif->hif_classid++;
672 0 : cl->cl_handle = qid;
673 0 : cl->cl_parent = parent;
674 :
675 0 : s = splnet();
676 0 : hif->hif_classes++;
677 :
678 : /*
679 : * find a free slot in the class table. if the slot matching
680 : * the lower bits of qid is free, use this slot. otherwise,
681 : * use the first free slot.
682 : */
683 0 : i = qid % hif->hif_allocated;
684 0 : if (hif->hif_class_tbl[i] == NULL)
685 0 : hif->hif_class_tbl[i] = cl;
686 : else {
687 0 : for (i = 0; i < hif->hif_allocated; i++)
688 0 : if (hif->hif_class_tbl[i] == NULL) {
689 0 : hif->hif_class_tbl[i] = cl;
690 0 : break;
691 : }
692 0 : if (i == hif->hif_allocated) {
693 0 : splx(s);
694 : goto err_ret;
695 : }
696 : }
697 :
698 0 : if (flags & HFSC_DEFAULTCLASS)
699 0 : hif->hif_defaultclass = cl;
700 :
701 0 : if (parent == NULL)
702 0 : hif->hif_rootclass = cl;
703 : else {
704 : /* add this class to the children list of the parent */
705 0 : if ((p = parent->cl_children) == NULL)
706 0 : parent->cl_children = cl;
707 : else {
708 0 : while (p->cl_siblings != NULL)
709 : p = p->cl_siblings;
710 0 : p->cl_siblings = cl;
711 : }
712 : }
713 0 : splx(s);
714 :
715 0 : return (cl);
716 :
717 : err_ret:
718 0 : if (cl->cl_fsc != NULL)
719 0 : pool_put(&hfsc_internal_sc_pl, cl->cl_fsc);
720 0 : if (cl->cl_rsc != NULL)
721 0 : pool_put(&hfsc_internal_sc_pl, cl->cl_rsc);
722 0 : if (cl->cl_usc != NULL)
723 0 : pool_put(&hfsc_internal_sc_pl, cl->cl_usc);
724 0 : pool_put(&hfsc_class_pl, cl);
725 0 : return (NULL);
726 0 : }
727 :
728 : int
729 0 : hfsc_class_destroy(struct hfsc_if *hif, struct hfsc_class *cl)
730 : {
731 : int i, s;
732 :
733 0 : if (cl == NULL)
734 0 : return (0);
735 :
736 0 : if (cl->cl_children != NULL)
737 0 : return (EBUSY);
738 :
739 0 : s = splnet();
740 0 : KASSERT(hfsc_class_qlength(cl) == 0);
741 :
742 0 : if (cl->cl_parent != NULL) {
743 0 : struct hfsc_class *p = cl->cl_parent->cl_children;
744 :
745 0 : if (p == cl)
746 0 : cl->cl_parent->cl_children = cl->cl_siblings;
747 0 : else do {
748 0 : if (p->cl_siblings == cl) {
749 0 : p->cl_siblings = cl->cl_siblings;
750 0 : break;
751 : }
752 0 : } while ((p = p->cl_siblings) != NULL);
753 0 : }
754 :
755 0 : for (i = 0; i < hif->hif_allocated; i++)
756 0 : if (hif->hif_class_tbl[i] == cl) {
757 0 : hif->hif_class_tbl[i] = NULL;
758 0 : break;
759 : }
760 :
761 0 : hif->hif_classes--;
762 0 : splx(s);
763 :
764 0 : KASSERT(TAILQ_EMPTY(&cl->cl_actc));
765 :
766 0 : if (cl == hif->hif_rootclass)
767 0 : hif->hif_rootclass = NULL;
768 0 : if (cl == hif->hif_defaultclass)
769 0 : hif->hif_defaultclass = NULL;
770 :
771 : /* Free external queue manager resources */
772 0 : if (cl->cl_qops && cl->cl_qops != pfq_hfsc_ops)
773 0 : cl->cl_qops->pfq_free(cl->cl_qdata);
774 :
775 0 : if (cl->cl_usc != NULL)
776 0 : pool_put(&hfsc_internal_sc_pl, cl->cl_usc);
777 0 : if (cl->cl_fsc != NULL)
778 0 : pool_put(&hfsc_internal_sc_pl, cl->cl_fsc);
779 0 : if (cl->cl_rsc != NULL)
780 0 : pool_put(&hfsc_internal_sc_pl, cl->cl_rsc);
781 0 : pool_put(&hfsc_class_pl, cl);
782 :
783 0 : return (0);
784 0 : }
785 :
786 : /*
787 : * hfsc_nextclass returns the next class in the tree.
788 : * usage:
789 : * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
790 : * do_something;
791 : */
792 : struct hfsc_class *
793 0 : hfsc_nextclass(struct hfsc_class *cl)
794 : {
795 0 : if (cl->cl_children != NULL)
796 0 : cl = cl->cl_children;
797 0 : else if (cl->cl_siblings != NULL)
798 0 : cl = cl->cl_siblings;
799 : else {
800 0 : while ((cl = cl->cl_parent) != NULL)
801 0 : if (cl->cl_siblings) {
802 : cl = cl->cl_siblings;
803 0 : break;
804 : }
805 : }
806 :
807 0 : return (cl);
808 : }
809 :
810 : struct mbuf *
811 0 : hfsc_enq(struct ifqueue *ifq, struct mbuf *m)
812 : {
813 0 : struct hfsc_if *hif = ifq->ifq_q;
814 : struct hfsc_class *cl;
815 : struct mbuf *dm;
816 :
817 0 : if ((cl = hfsc_clh2cph(hif, m->m_pkthdr.pf.qid)) == NULL ||
818 0 : cl->cl_children != NULL) {
819 0 : cl = hif->hif_defaultclass;
820 0 : if (cl == NULL)
821 0 : return (m);
822 : }
823 :
824 0 : dm = hfsc_class_enqueue(cl, m);
825 :
826 : /* successfully queued. */
827 0 : if (dm != m && hfsc_class_qlength(cl) == 1) {
828 0 : hfsc_set_active(hif, cl, m->m_pkthdr.len);
829 0 : if (!timeout_pending(&hif->hif_defer))
830 0 : timeout_add(&hif->hif_defer, 1);
831 : }
832 :
833 : /* drop occurred. */
834 0 : if (dm != NULL)
835 0 : PKTCNTR_INC(&cl->cl_stats.drop_cnt, dm->m_pkthdr.len);
836 :
837 0 : return (dm);
838 0 : }
839 :
840 : struct mbuf *
841 0 : hfsc_deq_begin(struct ifqueue *ifq, void **cookiep)
842 : {
843 0 : struct mbuf_list free_ml = MBUF_LIST_INITIALIZER();
844 0 : struct hfsc_if *hif = ifq->ifq_q;
845 : struct hfsc_class *cl, *tcl;
846 : struct mbuf *m;
847 : u_int64_t cur_time;
848 :
849 0 : cur_time = hfsc_microuptime();
850 :
851 : /*
852 : * if there are eligible classes, use real-time criteria.
853 : * find the class with the minimum deadline among
854 : * the eligible classes.
855 : */
856 0 : cl = hfsc_ellist_get_mindl(hif, cur_time);
857 0 : if (cl == NULL) {
858 : /*
859 : * use link-sharing criteria
860 : * get the class with the minimum vt in the hierarchy
861 : */
862 : cl = NULL;
863 0 : tcl = hif->hif_rootclass;
864 :
865 0 : while (tcl != NULL && tcl->cl_children != NULL) {
866 0 : tcl = hfsc_actlist_firstfit(tcl, cur_time);
867 0 : if (tcl == NULL)
868 0 : continue;
869 :
870 : /*
871 : * update parent's cl_cvtmin.
872 : * don't update if the new vt is smaller.
873 : */
874 0 : if (tcl->cl_parent->cl_cvtmin < tcl->cl_vt)
875 0 : tcl->cl_parent->cl_cvtmin = tcl->cl_vt;
876 :
877 : cl = tcl;
878 : }
879 : /* XXX HRTIMER plan hfsc_deferred precisely here. */
880 0 : if (cl == NULL)
881 0 : return (NULL);
882 : }
883 :
884 0 : m = hfsc_class_deq_begin(cl, &free_ml);
885 0 : ifq_mfreeml(ifq, &free_ml);
886 0 : if (m == NULL) {
887 0 : hfsc_update_sc(hif, cl, 0);
888 0 : return (NULL);
889 : }
890 :
891 0 : hif->hif_microtime = cur_time;
892 0 : *cookiep = cl;
893 0 : return (m);
894 0 : }
895 :
896 : void
897 0 : hfsc_deq_commit(struct ifqueue *ifq, struct mbuf *m, void *cookie)
898 : {
899 0 : struct hfsc_if *hif = ifq->ifq_q;
900 0 : struct hfsc_class *cl = cookie;
901 :
902 0 : hfsc_class_deq_commit(cl, m);
903 0 : hfsc_update_sc(hif, cl, m->m_pkthdr.len);
904 :
905 0 : PKTCNTR_INC(&cl->cl_stats.xmit_cnt, m->m_pkthdr.len);
906 0 : }
907 :
908 : void
909 0 : hfsc_update_sc(struct hfsc_if *hif, struct hfsc_class *cl, int len)
910 : {
911 : int next_len, realtime = 0;
912 0 : u_int64_t cur_time = hif->hif_microtime;
913 :
914 : /* check if the class was scheduled by real-time criteria */
915 0 : if (cl->cl_rsc != NULL)
916 0 : realtime = (cl->cl_e <= cur_time);
917 :
918 0 : hfsc_update_vf(cl, len, cur_time);
919 0 : if (realtime)
920 0 : cl->cl_cumul += len;
921 :
922 0 : if (hfsc_class_qlength(cl) > 0) {
923 : /*
924 : * Realtime queue needs to look into the future and make
925 : * calculations based on that. This is the reason it can't
926 : * be used with an external queue manager.
927 : */
928 0 : if (cl->cl_rsc != NULL) {
929 : struct mbuf *m0;
930 :
931 : /* update ed */
932 0 : KASSERT(cl->cl_qops == pfq_hfsc_ops);
933 0 : m0 = MBUF_LIST_FIRST(&cl->cl_q.q);
934 0 : next_len = m0->m_pkthdr.len;
935 :
936 0 : if (realtime)
937 0 : hfsc_update_ed(hif, cl, next_len);
938 : else
939 0 : hfsc_update_d(cl, next_len);
940 0 : }
941 : } else {
942 : /* the class becomes passive */
943 0 : hfsc_set_passive(hif, cl);
944 : }
945 0 : }
946 :
947 : void
948 0 : hfsc_deferred(void *arg)
949 : {
950 0 : struct ifnet *ifp = arg;
951 0 : struct ifqueue *ifq = &ifp->if_snd;
952 : struct hfsc_if *hif;
953 :
954 0 : if (!HFSC_ENABLED(ifq))
955 0 : return;
956 :
957 0 : if (!ifq_empty(ifq))
958 0 : ifq_start(ifq);
959 :
960 0 : hif = ifq_q_enter(&ifp->if_snd, ifq_hfsc_ops);
961 0 : if (hif == NULL)
962 0 : return;
963 : /* XXX HRTIMER nearest virtual/fit time is likely less than 1/HZ. */
964 0 : timeout_add(&hif->hif_defer, 1);
965 0 : ifq_q_leave(&ifp->if_snd, hif);
966 0 : }
967 :
968 : void
969 0 : hfsc_cl_purge(struct hfsc_if *hif, struct hfsc_class *cl, struct mbuf_list *ml)
970 : {
971 0 : struct mbuf_list ml2 = MBUF_LIST_INITIALIZER();
972 :
973 0 : hfsc_class_purge(cl, &ml2);
974 0 : if (ml_empty(&ml2))
975 0 : return;
976 :
977 0 : ml_enlist(ml, &ml2);
978 :
979 0 : hfsc_update_vf(cl, 0, 0); /* remove cl from the actlist */
980 0 : hfsc_set_passive(hif, cl);
981 0 : }
982 :
983 : void
984 0 : hfsc_set_active(struct hfsc_if *hif, struct hfsc_class *cl, int len)
985 : {
986 0 : if (cl->cl_rsc != NULL)
987 0 : hfsc_init_ed(hif, cl, len);
988 0 : if (cl->cl_fsc != NULL)
989 0 : hfsc_init_vf(cl, len);
990 :
991 0 : cl->cl_stats.period++;
992 0 : }
993 :
994 : void
995 0 : hfsc_set_passive(struct hfsc_if *hif, struct hfsc_class *cl)
996 : {
997 0 : if (cl->cl_rsc != NULL)
998 0 : hfsc_ellist_remove(hif, cl);
999 :
1000 : /*
1001 : * actlist is handled in hfsc_update_vf() so that hfsc_update_vf(cl, 0,
1002 : * 0) needs to be called explicitly to remove a class from actlist
1003 : */
1004 0 : }
1005 :
1006 : void
1007 0 : hfsc_init_ed(struct hfsc_if *hif, struct hfsc_class *cl, int next_len)
1008 : {
1009 : u_int64_t cur_time;
1010 :
1011 0 : cur_time = hfsc_microuptime();
1012 :
1013 : /* update the deadline curve */
1014 0 : hfsc_rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
1015 :
1016 : /*
1017 : * update the eligible curve.
1018 : * for concave, it is equal to the deadline curve.
1019 : * for convex, it is a linear curve with slope m2.
1020 : */
1021 0 : cl->cl_eligible = cl->cl_deadline;
1022 0 : if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1023 0 : cl->cl_eligible.dx = 0;
1024 0 : cl->cl_eligible.dy = 0;
1025 0 : }
1026 :
1027 : /* compute e and d */
1028 0 : cl->cl_e = hfsc_rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
1029 0 : cl->cl_d = hfsc_rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
1030 :
1031 0 : hfsc_ellist_insert(hif, cl);
1032 0 : }
1033 :
1034 : void
1035 0 : hfsc_update_ed(struct hfsc_if *hif, struct hfsc_class *cl, int next_len)
1036 : {
1037 0 : cl->cl_e = hfsc_rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
1038 0 : cl->cl_d = hfsc_rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
1039 :
1040 0 : hfsc_ellist_update(hif, cl);
1041 0 : }
1042 :
1043 : void
1044 0 : hfsc_update_d(struct hfsc_class *cl, int next_len)
1045 : {
1046 0 : cl->cl_d = hfsc_rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
1047 0 : }
1048 :
1049 : void
1050 0 : hfsc_init_vf(struct hfsc_class *cl, int len)
1051 : {
1052 : struct hfsc_class *max_cl, *p;
1053 : u_int64_t vt, f, cur_time;
1054 : int go_active;
1055 :
1056 : cur_time = 0;
1057 : go_active = 1;
1058 0 : for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
1059 0 : if (go_active && cl->cl_nactive++ == 0)
1060 0 : go_active = 1;
1061 : else
1062 : go_active = 0;
1063 :
1064 0 : if (go_active) {
1065 0 : max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc,
1066 : hfsc_active);
1067 0 : if (max_cl != NULL) {
1068 : /*
1069 : * set vt to the average of the min and max
1070 : * classes. if the parent's period didn't
1071 : * change, don't decrease vt of the class.
1072 : */
1073 0 : vt = max_cl->cl_vt;
1074 0 : if (cl->cl_parent->cl_cvtmin != 0)
1075 0 : vt = (cl->cl_parent->cl_cvtmin + vt)/2;
1076 :
1077 0 : if (cl->cl_parent->cl_vtperiod !=
1078 0 : cl->cl_parentperiod || vt > cl->cl_vt)
1079 0 : cl->cl_vt = vt;
1080 : } else {
1081 : /*
1082 : * first child for a new parent backlog period.
1083 : * add parent's cvtmax to vtoff of children
1084 : * to make a new vt (vtoff + vt) larger than
1085 : * the vt in the last period for all children.
1086 : */
1087 0 : vt = cl->cl_parent->cl_cvtmax;
1088 0 : for (p = cl->cl_parent->cl_children; p != NULL;
1089 0 : p = p->cl_siblings)
1090 0 : p->cl_vtoff += vt;
1091 0 : cl->cl_vt = 0;
1092 0 : cl->cl_parent->cl_cvtmax = 0;
1093 0 : cl->cl_parent->cl_cvtmin = 0;
1094 : }
1095 0 : cl->cl_initvt = cl->cl_vt;
1096 :
1097 : /* update the virtual curve */
1098 0 : vt = cl->cl_vt + cl->cl_vtoff;
1099 0 : hfsc_rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt,
1100 0 : cl->cl_total);
1101 0 : if (cl->cl_virtual.x == vt) {
1102 0 : cl->cl_virtual.x -= cl->cl_vtoff;
1103 0 : cl->cl_vtoff = 0;
1104 0 : }
1105 0 : cl->cl_vtadj = 0;
1106 :
1107 0 : cl->cl_vtperiod++; /* increment vt period */
1108 0 : cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1109 0 : if (cl->cl_parent->cl_nactive == 0)
1110 0 : cl->cl_parentperiod++;
1111 0 : cl->cl_f = 0;
1112 :
1113 0 : hfsc_actlist_insert(cl);
1114 :
1115 0 : if (cl->cl_usc != NULL) {
1116 : /* class has upper limit curve */
1117 0 : if (cur_time == 0)
1118 0 : cur_time = hfsc_microuptime();
1119 :
1120 : /* update the ulimit curve */
1121 0 : hfsc_rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1122 0 : cl->cl_total);
1123 : /* compute myf */
1124 0 : cl->cl_myf = hfsc_rtsc_y2x(&cl->cl_ulimit,
1125 0 : cl->cl_total);
1126 0 : cl->cl_myfadj = 0;
1127 0 : }
1128 : }
1129 :
1130 0 : if (cl->cl_myf > cl->cl_cfmin)
1131 0 : f = cl->cl_myf;
1132 : else
1133 : f = cl->cl_cfmin;
1134 0 : if (f != cl->cl_f) {
1135 0 : cl->cl_f = f;
1136 0 : hfsc_update_cfmin(cl->cl_parent);
1137 0 : }
1138 : }
1139 0 : }
1140 :
1141 : void
1142 0 : hfsc_update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1143 : {
1144 : u_int64_t f, myf_bound, delta;
1145 : int go_passive = 0;
1146 :
1147 0 : if (hfsc_class_qlength(cl) == 0)
1148 0 : go_passive = 1;
1149 :
1150 0 : for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1151 0 : cl->cl_total += len;
1152 :
1153 0 : if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1154 : continue;
1155 :
1156 0 : if (go_passive && --cl->cl_nactive == 0)
1157 0 : go_passive = 1;
1158 : else
1159 : go_passive = 0;
1160 :
1161 0 : if (go_passive) {
1162 : /* no more active child, going passive */
1163 :
1164 : /* update cvtmax of the parent class */
1165 0 : if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1166 0 : cl->cl_parent->cl_cvtmax = cl->cl_vt;
1167 :
1168 : /* remove this class from the vt list */
1169 0 : hfsc_actlist_remove(cl);
1170 :
1171 0 : hfsc_update_cfmin(cl->cl_parent);
1172 :
1173 0 : continue;
1174 : }
1175 :
1176 : /*
1177 : * update vt and f
1178 : */
1179 0 : cl->cl_vt = hfsc_rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1180 0 : - cl->cl_vtoff + cl->cl_vtadj;
1181 :
1182 : /*
1183 : * if vt of the class is smaller than cvtmin,
1184 : * the class was skipped in the past due to non-fit.
1185 : * if so, we need to adjust vtadj.
1186 : */
1187 0 : if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1188 0 : cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1189 0 : cl->cl_vt = cl->cl_parent->cl_cvtmin;
1190 0 : }
1191 :
1192 : /* update the vt list */
1193 0 : hfsc_actlist_update(cl);
1194 :
1195 0 : if (cl->cl_usc != NULL) {
1196 0 : cl->cl_myf = cl->cl_myfadj +
1197 0 : hfsc_rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1198 :
1199 : /*
1200 : * if myf lags behind by more than one clock tick
1201 : * from the current time, adjust myfadj to prevent
1202 : * a rate-limited class from going greedy.
1203 : * in a steady state under rate-limiting, myf
1204 : * fluctuates within one clock tick.
1205 : */
1206 0 : myf_bound = cur_time - HFSC_CLK_PER_TICK;
1207 0 : if (cl->cl_myf < myf_bound) {
1208 0 : delta = cur_time - cl->cl_myf;
1209 0 : cl->cl_myfadj += delta;
1210 0 : cl->cl_myf += delta;
1211 0 : }
1212 : }
1213 :
1214 : /* cl_f is max(cl_myf, cl_cfmin) */
1215 0 : if (cl->cl_myf > cl->cl_cfmin)
1216 0 : f = cl->cl_myf;
1217 : else
1218 : f = cl->cl_cfmin;
1219 0 : if (f != cl->cl_f) {
1220 0 : cl->cl_f = f;
1221 0 : hfsc_update_cfmin(cl->cl_parent);
1222 0 : }
1223 : }
1224 0 : }
1225 :
1226 : void
1227 0 : hfsc_update_cfmin(struct hfsc_class *cl)
1228 : {
1229 : struct hfsc_class *p;
1230 : u_int64_t cfmin;
1231 :
1232 0 : if (TAILQ_EMPTY(&cl->cl_actc)) {
1233 0 : cl->cl_cfmin = 0;
1234 0 : return;
1235 : }
1236 : cfmin = HFSC_HT_INFINITY;
1237 0 : TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
1238 0 : if (p->cl_f == 0) {
1239 0 : cl->cl_cfmin = 0;
1240 0 : return;
1241 : }
1242 0 : if (p->cl_f < cfmin)
1243 0 : cfmin = p->cl_f;
1244 : }
1245 0 : cl->cl_cfmin = cfmin;
1246 0 : }
1247 :
1248 : /*
1249 : * eligible list holds backlogged classes being sorted by their eligible times.
1250 : * there is one eligible list per interface.
1251 : */
1252 : void
1253 0 : hfsc_ellist_insert(struct hfsc_if *hif, struct hfsc_class *cl)
1254 : {
1255 : struct hfsc_class *p;
1256 :
1257 : /* check the last entry first */
1258 0 : if ((p = TAILQ_LAST(&hif->hif_eligible, hfsc_eligible)) == NULL ||
1259 0 : p->cl_e <= cl->cl_e) {
1260 0 : TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1261 0 : return;
1262 : }
1263 :
1264 0 : TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1265 0 : if (cl->cl_e < p->cl_e) {
1266 0 : TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1267 0 : return;
1268 : }
1269 : }
1270 0 : }
1271 :
1272 : void
1273 0 : hfsc_ellist_remove(struct hfsc_if *hif, struct hfsc_class *cl)
1274 : {
1275 0 : TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1276 0 : }
1277 :
1278 : void
1279 0 : hfsc_ellist_update(struct hfsc_if *hif, struct hfsc_class *cl)
1280 : {
1281 : struct hfsc_class *p, *last;
1282 :
1283 : /*
1284 : * the eligible time of a class increases monotonically.
1285 : * if the next entry has a larger eligible time, nothing to do.
1286 : */
1287 0 : p = TAILQ_NEXT(cl, cl_ellist);
1288 0 : if (p == NULL || cl->cl_e <= p->cl_e)
1289 0 : return;
1290 :
1291 : /* check the last entry */
1292 0 : last = TAILQ_LAST(&hif->hif_eligible, hfsc_eligible);
1293 0 : if (last->cl_e <= cl->cl_e) {
1294 0 : TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1295 0 : TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1296 0 : return;
1297 : }
1298 :
1299 : /*
1300 : * the new position must be between the next entry
1301 : * and the last entry
1302 : */
1303 0 : while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1304 0 : if (cl->cl_e < p->cl_e) {
1305 0 : TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1306 0 : TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1307 0 : return;
1308 : }
1309 : }
1310 0 : }
1311 :
1312 : /* find the class with the minimum deadline among the eligible classes */
1313 : struct hfsc_class *
1314 0 : hfsc_ellist_get_mindl(struct hfsc_if *hif, u_int64_t cur_time)
1315 : {
1316 : struct hfsc_class *p, *cl = NULL;
1317 :
1318 0 : TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1319 0 : if (p->cl_e > cur_time)
1320 : break;
1321 0 : if (cl == NULL || p->cl_d < cl->cl_d)
1322 0 : cl = p;
1323 : }
1324 0 : return (cl);
1325 : }
1326 :
1327 : /*
1328 : * active children list holds backlogged child classes being sorted
1329 : * by their virtual time.
1330 : * each intermediate class has one active children list.
1331 : */
1332 : void
1333 0 : hfsc_actlist_insert(struct hfsc_class *cl)
1334 : {
1335 : struct hfsc_class *p;
1336 :
1337 : /* check the last entry first */
1338 0 : if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, hfsc_active)) == NULL
1339 0 : || p->cl_vt <= cl->cl_vt) {
1340 0 : TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1341 0 : return;
1342 : }
1343 :
1344 0 : TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) {
1345 0 : if (cl->cl_vt < p->cl_vt) {
1346 0 : TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1347 0 : return;
1348 : }
1349 : }
1350 0 : }
1351 :
1352 : void
1353 0 : hfsc_actlist_remove(struct hfsc_class *cl)
1354 : {
1355 0 : TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1356 0 : }
1357 :
1358 : void
1359 0 : hfsc_actlist_update(struct hfsc_class *cl)
1360 : {
1361 : struct hfsc_class *p, *last;
1362 :
1363 : /*
1364 : * the virtual time of a class increases monotonically during its
1365 : * backlogged period.
1366 : * if the next entry has a larger virtual time, nothing to do.
1367 : */
1368 0 : p = TAILQ_NEXT(cl, cl_actlist);
1369 0 : if (p == NULL || cl->cl_vt < p->cl_vt)
1370 0 : return;
1371 :
1372 : /* check the last entry */
1373 0 : last = TAILQ_LAST(&cl->cl_parent->cl_actc, hfsc_active);
1374 0 : if (last->cl_vt <= cl->cl_vt) {
1375 0 : TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1376 0 : TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1377 0 : return;
1378 : }
1379 :
1380 : /*
1381 : * the new position must be between the next entry
1382 : * and the last entry
1383 : */
1384 0 : while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1385 0 : if (cl->cl_vt < p->cl_vt) {
1386 0 : TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1387 0 : TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1388 0 : return;
1389 : }
1390 : }
1391 0 : }
1392 :
1393 : struct hfsc_class *
1394 0 : hfsc_actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1395 : {
1396 : struct hfsc_class *p;
1397 :
1398 0 : TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist)
1399 0 : if (p->cl_f <= cur_time)
1400 0 : return (p);
1401 :
1402 0 : return (NULL);
1403 0 : }
1404 :
1405 : /*
1406 : * service curve support functions
1407 : *
1408 : * external service curve parameters
1409 : * m: bits/sec
1410 : * d: msec
1411 : * internal service curve parameters
1412 : * sm: (bytes/tsc_interval) << SM_SHIFT
1413 : * ism: (tsc_count/byte) << ISM_SHIFT
1414 : * dx: tsc_count
1415 : *
1416 : * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1417 : * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1418 : * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1419 : * digits in decimal using the following table.
1420 : *
1421 : * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1422 : * ----------+-------------------------------------------------------
1423 : * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1424 : * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1425 : * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1426 : *
1427 : * nsec/byte 80000 8000 800 80 8
1428 : * ism(500MHz) 40000 4000 400 40 4
1429 : * ism(200MHz) 16000 1600 160 16 1.6
1430 : */
1431 : #define SM_SHIFT 24
1432 : #define ISM_SHIFT 10
1433 :
1434 : #define SM_MASK ((1LL << SM_SHIFT) - 1)
1435 : #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1436 :
1437 : static __inline u_int64_t
1438 0 : seg_x2y(u_int64_t x, u_int64_t sm)
1439 : {
1440 : u_int64_t y;
1441 :
1442 : /*
1443 : * compute
1444 : * y = x * sm >> SM_SHIFT
1445 : * but divide it for the upper and lower bits to avoid overflow
1446 : */
1447 0 : y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1448 0 : return (y);
1449 : }
1450 :
1451 : static __inline u_int64_t
1452 0 : seg_y2x(u_int64_t y, u_int64_t ism)
1453 : {
1454 : u_int64_t x;
1455 :
1456 0 : if (y == 0)
1457 0 : x = 0;
1458 0 : else if (ism == HFSC_HT_INFINITY)
1459 0 : x = HFSC_HT_INFINITY;
1460 : else {
1461 0 : x = (y >> ISM_SHIFT) * ism
1462 0 : + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1463 : }
1464 0 : return (x);
1465 : }
1466 :
1467 : static __inline u_int64_t
1468 0 : m2sm(u_int m)
1469 : {
1470 : u_int64_t sm;
1471 :
1472 0 : sm = ((u_int64_t)m << SM_SHIFT) / 8 / HFSC_FREQ;
1473 0 : return (sm);
1474 : }
1475 :
1476 : static __inline u_int64_t
1477 0 : m2ism(u_int m)
1478 : {
1479 : u_int64_t ism;
1480 :
1481 0 : if (m == 0)
1482 0 : ism = HFSC_HT_INFINITY;
1483 : else
1484 0 : ism = ((u_int64_t)HFSC_FREQ << ISM_SHIFT) * 8 / m;
1485 0 : return (ism);
1486 : }
1487 :
1488 : static __inline u_int64_t
1489 0 : d2dx(u_int d)
1490 : {
1491 : u_int64_t dx;
1492 :
1493 0 : dx = ((u_int64_t)d * HFSC_FREQ) / 1000;
1494 0 : return (dx);
1495 : }
1496 :
1497 : static __inline u_int
1498 0 : sm2m(u_int64_t sm)
1499 : {
1500 : u_int64_t m;
1501 :
1502 0 : m = (sm * 8 * HFSC_FREQ) >> SM_SHIFT;
1503 0 : return ((u_int)m);
1504 : }
1505 :
1506 : static __inline u_int
1507 0 : dx2d(u_int64_t dx)
1508 : {
1509 : u_int64_t d;
1510 :
1511 0 : d = dx * 1000 / HFSC_FREQ;
1512 0 : return ((u_int)d);
1513 : }
1514 :
1515 : void
1516 0 : hfsc_sc2isc(struct hfsc_sc *sc, struct hfsc_internal_sc *isc)
1517 : {
1518 0 : isc->sm1 = m2sm(sc->m1);
1519 0 : isc->ism1 = m2ism(sc->m1);
1520 0 : isc->dx = d2dx(sc->d);
1521 0 : isc->dy = seg_x2y(isc->dx, isc->sm1);
1522 0 : isc->sm2 = m2sm(sc->m2);
1523 0 : isc->ism2 = m2ism(sc->m2);
1524 0 : }
1525 :
1526 : /*
1527 : * initialize the runtime service curve with the given internal
1528 : * service curve starting at (x, y).
1529 : */
1530 : void
1531 0 : hfsc_rtsc_init(struct hfsc_runtime_sc *rtsc, struct hfsc_internal_sc * isc,
1532 : u_int64_t x, u_int64_t y)
1533 : {
1534 0 : rtsc->x = x;
1535 0 : rtsc->y = y;
1536 0 : rtsc->sm1 = isc->sm1;
1537 0 : rtsc->ism1 = isc->ism1;
1538 0 : rtsc->dx = isc->dx;
1539 0 : rtsc->dy = isc->dy;
1540 0 : rtsc->sm2 = isc->sm2;
1541 0 : rtsc->ism2 = isc->ism2;
1542 0 : }
1543 :
1544 : /*
1545 : * calculate the y-projection of the runtime service curve by the
1546 : * given x-projection value
1547 : */
1548 : u_int64_t
1549 0 : hfsc_rtsc_y2x(struct hfsc_runtime_sc *rtsc, u_int64_t y)
1550 : {
1551 : u_int64_t x;
1552 :
1553 0 : if (y < rtsc->y)
1554 0 : x = rtsc->x;
1555 0 : else if (y <= rtsc->y + rtsc->dy) {
1556 : /* x belongs to the 1st segment */
1557 0 : if (rtsc->dy == 0)
1558 0 : x = rtsc->x + rtsc->dx;
1559 : else
1560 0 : x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1561 : } else {
1562 : /* x belongs to the 2nd segment */
1563 0 : x = rtsc->x + rtsc->dx
1564 0 : + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1565 : }
1566 0 : return (x);
1567 : }
1568 :
1569 : u_int64_t
1570 0 : hfsc_rtsc_x2y(struct hfsc_runtime_sc *rtsc, u_int64_t x)
1571 : {
1572 : u_int64_t y;
1573 :
1574 0 : if (x <= rtsc->x)
1575 0 : y = rtsc->y;
1576 0 : else if (x <= rtsc->x + rtsc->dx)
1577 : /* y belongs to the 1st segment */
1578 0 : y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1579 : else
1580 : /* y belongs to the 2nd segment */
1581 0 : y = rtsc->y + rtsc->dy
1582 0 : + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1583 0 : return (y);
1584 : }
1585 :
1586 : /*
1587 : * update the runtime service curve by taking the minimum of the current
1588 : * runtime service curve and the service curve starting at (x, y).
1589 : */
1590 : void
1591 0 : hfsc_rtsc_min(struct hfsc_runtime_sc *rtsc, struct hfsc_internal_sc *isc,
1592 : u_int64_t x, u_int64_t y)
1593 : {
1594 : u_int64_t y1, y2, dx, dy;
1595 :
1596 0 : if (isc->sm1 <= isc->sm2) {
1597 : /* service curve is convex */
1598 : y1 = hfsc_rtsc_x2y(rtsc, x);
1599 0 : if (y1 < y)
1600 : /* the current rtsc is smaller */
1601 0 : return;
1602 0 : rtsc->x = x;
1603 0 : rtsc->y = y;
1604 0 : return;
1605 : }
1606 :
1607 : /*
1608 : * service curve is concave
1609 : * compute the two y values of the current rtsc
1610 : * y1: at x
1611 : * y2: at (x + dx)
1612 : */
1613 : y1 = hfsc_rtsc_x2y(rtsc, x);
1614 0 : if (y1 <= y) {
1615 : /* rtsc is below isc, no change to rtsc */
1616 0 : return;
1617 : }
1618 :
1619 0 : y2 = hfsc_rtsc_x2y(rtsc, x + isc->dx);
1620 0 : if (y2 >= y + isc->dy) {
1621 : /* rtsc is above isc, replace rtsc by isc */
1622 0 : rtsc->x = x;
1623 0 : rtsc->y = y;
1624 0 : rtsc->dx = isc->dx;
1625 0 : rtsc->dy = isc->dy;
1626 0 : return;
1627 : }
1628 :
1629 : /*
1630 : * the two curves intersect
1631 : * compute the offsets (dx, dy) using the reverse
1632 : * function of seg_x2y()
1633 : * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1634 : */
1635 0 : dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1636 : /*
1637 : * check if (x, y1) belongs to the 1st segment of rtsc.
1638 : * if so, add the offset.
1639 : */
1640 0 : if (rtsc->x + rtsc->dx > x)
1641 0 : dx += rtsc->x + rtsc->dx - x;
1642 0 : dy = seg_x2y(dx, isc->sm1);
1643 :
1644 0 : rtsc->x = x;
1645 0 : rtsc->y = y;
1646 0 : rtsc->dx = dx;
1647 0 : rtsc->dy = dy;
1648 0 : return;
1649 0 : }
1650 :
1651 : void
1652 0 : hfsc_getclstats(struct hfsc_class_stats *sp, struct hfsc_class *cl)
1653 : {
1654 0 : sp->class_id = cl->cl_id;
1655 0 : sp->class_handle = cl->cl_handle;
1656 :
1657 0 : if (cl->cl_rsc != NULL) {
1658 0 : sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1659 0 : sp->rsc.d = dx2d(cl->cl_rsc->dx);
1660 0 : sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1661 0 : } else {
1662 0 : sp->rsc.m1 = 0;
1663 0 : sp->rsc.d = 0;
1664 0 : sp->rsc.m2 = 0;
1665 : }
1666 0 : if (cl->cl_fsc != NULL) {
1667 0 : sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1668 0 : sp->fsc.d = dx2d(cl->cl_fsc->dx);
1669 0 : sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1670 0 : } else {
1671 0 : sp->fsc.m1 = 0;
1672 0 : sp->fsc.d = 0;
1673 0 : sp->fsc.m2 = 0;
1674 : }
1675 0 : if (cl->cl_usc != NULL) {
1676 0 : sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1677 0 : sp->usc.d = dx2d(cl->cl_usc->dx);
1678 0 : sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1679 0 : } else {
1680 0 : sp->usc.m1 = 0;
1681 0 : sp->usc.d = 0;
1682 0 : sp->usc.m2 = 0;
1683 : }
1684 :
1685 0 : sp->total = cl->cl_total;
1686 0 : sp->cumul = cl->cl_cumul;
1687 :
1688 0 : sp->d = cl->cl_d;
1689 0 : sp->e = cl->cl_e;
1690 0 : sp->vt = cl->cl_vt;
1691 0 : sp->f = cl->cl_f;
1692 :
1693 0 : sp->initvt = cl->cl_initvt;
1694 0 : sp->vtperiod = cl->cl_vtperiod;
1695 0 : sp->parentperiod = cl->cl_parentperiod;
1696 0 : sp->nactive = cl->cl_nactive;
1697 0 : sp->vtoff = cl->cl_vtoff;
1698 0 : sp->cvtmax = cl->cl_cvtmax;
1699 0 : sp->myf = cl->cl_myf;
1700 0 : sp->cfmin = cl->cl_cfmin;
1701 0 : sp->cvtmin = cl->cl_cvtmin;
1702 0 : sp->myfadj = cl->cl_myfadj;
1703 0 : sp->vtadj = cl->cl_vtadj;
1704 :
1705 0 : sp->cur_time = hfsc_microuptime();
1706 0 : sp->machclk_freq = HFSC_FREQ;
1707 :
1708 0 : sp->qlength = hfsc_class_qlength(cl);
1709 0 : sp->qlimit = cl->cl_q.qlimit;
1710 0 : sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1711 0 : sp->drop_cnt = cl->cl_stats.drop_cnt;
1712 0 : sp->period = cl->cl_stats.period;
1713 :
1714 0 : sp->qtype = 0;
1715 0 : }
1716 :
1717 : /* convert a class handle to the corresponding class pointer */
1718 : struct hfsc_class *
1719 0 : hfsc_clh2cph(struct hfsc_if *hif, u_int32_t chandle)
1720 : {
1721 : int i;
1722 : struct hfsc_class *cl;
1723 :
1724 0 : if (chandle == 0)
1725 0 : return (NULL);
1726 : /*
1727 : * first, try the slot corresponding to the lower bits of the handle.
1728 : * if it does not match, do the linear table search.
1729 : */
1730 0 : i = chandle % hif->hif_allocated;
1731 0 : if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1732 0 : return (cl);
1733 0 : for (i = 0; i < hif->hif_allocated; i++)
1734 0 : if ((cl = hif->hif_class_tbl[i]) != NULL &&
1735 0 : cl->cl_handle == chandle)
1736 0 : return (cl);
1737 0 : return (NULL);
1738 0 : }
|