Lines Matching refs:lr

220 iflr_reachable(struct if_llreach *lr, int cmp_delta, u_int64_t tval)
229 expire = lr->lr_lastrcvd + lr->lr_reachable;
243 return ((expire >= now) && (now - tval) < lr->lr_reachable);
247 ifnet_llreach_reachable(struct if_llreach *lr)
252 return (iflr_reachable(lr, 0, 0));
256 ifnet_llreach_reachable_delta(struct if_llreach *lr, u_int64_t tval)
261 return (iflr_reachable(lr, 1, tval));
268 struct if_llreach find, *lr;
276 lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
277 if (lr == NULL) {
284 lr->lr_lastrcvd = net_uptime();
292 struct if_llreach find, *lr;
304 lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
305 if (lr != NULL) {
307 IFLR_LOCK(lr);
308 VERIFY(lr->lr_reqcnt >= 1);
309 lr->lr_reqcnt++;
310 VERIFY(lr->lr_reqcnt != 0);
311 IFLR_ADDREF_LOCKED(lr); /* for caller */
312 lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */
313 IFLR_UNLOCK(lr);
315 return (lr);
324 lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find);
325 if (lr != NULL)
328 lr = iflr_alloc(M_WAITOK);
329 if (lr == NULL) {
333 IFLR_LOCK(lr);
334 lr->lr_reqcnt++;
335 VERIFY(lr->lr_reqcnt == 1);
336 IFLR_ADDREF_LOCKED(lr); /* for RB tree */
337 IFLR_ADDREF_LOCKED(lr); /* for caller */
338 lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */
339 lr->lr_baseup = lr->lr_lastrcvd; /* base uptime */
341 lr->lr_basecal = now.tv_sec; /* base calendar time */
342 lr->lr_basereachable = llreach_base;
343 lr->lr_reachable = LL_COMPUTE_RTIME(lr->lr_basereachable * 1000);
344 lr->lr_debug |= IFD_ATTACHED;
345 lr->lr_ifp = ifp;
346 lr->lr_key.proto = llproto;
347 bcopy(addr, &lr->lr_key.addr, IF_LLREACH_MAXLEN);
348 lr->lr_rssi = IFNET_RSSI_UNKNOWN;
349 lr->lr_lqm = IFNET_LQM_THRESH_UNKNOWN;
350 lr->lr_npm = IFNET_NPM_THRESH_UNKNOWN;
351 RB_INSERT(ll_reach_tree, &ifp->if_ll_srcs, lr);
352 IFLR_UNLOCK(lr);
355 return (lr);
359 ifnet_llreach_free(struct if_llreach *lr)
364 ifp = lr->lr_ifp;
367 IFLR_LOCK(lr);
368 if (lr->lr_reqcnt == 0) {
369 panic("%s: lr=%p negative reqcnt", __func__, lr);
372 --lr->lr_reqcnt;
373 if (lr->lr_reqcnt > 0) {
374 IFLR_UNLOCK(lr);
376 IFLR_REMREF(lr); /* for caller */
379 if (!(lr->lr_debug & IFD_ATTACHED)) {
380 panic("%s: Attempt to detach an unattached llreach lr=%p",
381 __func__, lr);
384 lr->lr_debug &= ~IFD_ATTACHED;
385 RB_REMOVE(ll_reach_tree, &ifp->if_ll_srcs, lr);
386 IFLR_UNLOCK(lr);
389 IFLR_REMREF(lr); /* for RB tree */
390 IFLR_REMREF(lr); /* for caller */
394 ifnet_llreach_up2calexp(struct if_llreach *lr, u_int64_t uptime)
409 lr->lr_basecal += (cnow.tv_sec - lr->lr_basecal) -
410 (unow - lr->lr_baseup);
412 calendar = lr->lr_basecal + lr->lr_reachable +
413 (uptime - lr->lr_baseup);
420 ifnet_llreach_up2upexp(struct if_llreach *lr, u_int64_t uptime)
422 return (lr->lr_reachable + uptime);
478 struct if_llreach *lr;
480 lr = (how == M_WAITOK) ? zalloc(iflr_zone) : zalloc_noblock(iflr_zone);
481 if (lr != NULL) {
482 bzero(lr, iflr_size);
483 lck_mtx_init(&lr->lr_lock, ifnet_lock_group, ifnet_lock_attr);
484 lr->lr_debug |= IFD_ALLOC;
486 return (lr);
490 iflr_free(struct if_llreach *lr)
492 IFLR_LOCK(lr);
493 if (lr->lr_debug & IFD_ATTACHED) {
494 panic("%s: attached lr=%p is being freed", __func__, lr);
496 } else if (!(lr->lr_debug & IFD_ALLOC)) {
497 panic("%s: lr %p cannot be freed", __func__, lr);
499 } else if (lr->lr_refcnt != 0) {
500 panic("%s: non-zero refcount lr=%p", __func__, lr);
502 } else if (lr->lr_reqcnt != 0) {
503 panic("%s: non-zero reqcnt lr=%p", __func__, lr);
506 lr->lr_debug &= ~IFD_ALLOC;
507 IFLR_UNLOCK(lr);
509 lck_mtx_destroy(&lr->lr_lock, ifnet_lock_group);
510 zfree(iflr_zone, lr);
514 iflr_addref(struct if_llreach *lr, int locked)
517 IFLR_LOCK(lr);
519 IFLR_LOCK_ASSERT_HELD(lr);
521 if (++lr->lr_refcnt == 0) {
522 panic("%s: lr=%p wraparound refcnt", __func__, lr);
526 IFLR_UNLOCK(lr);
530 iflr_remref(struct if_llreach *lr)
532 IFLR_LOCK(lr);
533 if (lr->lr_refcnt == 0) {
534 panic("%s: lr=%p negative refcnt", __func__, lr);
537 --lr->lr_refcnt;
538 if (lr->lr_refcnt > 0) {
539 IFLR_UNLOCK(lr);
542 IFLR_UNLOCK(lr);
544 iflr_free(lr); /* deallocate it */
548 ifnet_lr2ri(struct if_llreach *lr, struct rt_reach_info *ri)
552 IFLR_LOCK_ASSERT_HELD(lr);
555 ifnet_lr2lri(lr, &lri);
565 ifnet_lr2iflri(struct if_llreach *lr, struct ifnet_llreach_info *iflri)
567 IFLR_LOCK_ASSERT_HELD(lr);
573 iflri->iflri_refcnt = lr->lr_reqcnt;
574 iflri->iflri_probes = lr->lr_probes;
575 iflri->iflri_rcv_expire = ifnet_llreach_up2upexp(lr, lr->lr_lastrcvd);
577 switch (lr->lr_key.proto) {
593 bcopy(&lr->lr_key.addr, &iflri->iflri_addr, IF_LLREACH_MAXLEN);
594 iflri->iflri_rssi = lr->lr_rssi;
595 iflri->iflri_lqm = lr->lr_lqm;
596 iflri->iflri_npm = lr->lr_npm;
600 ifnet_lr2lri(struct if_llreach *lr, struct if_llreach_info *lri)
602 IFLR_LOCK_ASSERT_HELD(lr);
608 lri->lri_refcnt = lr->lr_reqcnt;
609 lri->lri_ifindex = lr->lr_ifp->if_index;
610 lri->lri_probes = lr->lr_probes;
611 lri->lri_expire = ifnet_llreach_up2calexp(lr, lr->lr_lastrcvd);
612 lri->lri_proto = lr->lr_key.proto;
613 bcopy(&lr->lr_key.addr, &lri->lri_addr, IF_LLREACH_MAXLEN);
614 lri->lri_rssi = lr->lr_rssi;
615 lri->lri_lqm = lr->lr_lqm;
616 lri->lri_npm = lr->lr_npm;
626 struct if_llreach *lr;
655 RB_FOREACH(lr, ll_reach_tree, &ifp->if_ll_srcs) {
657 IFLR_LOCK(lr);
658 ifnet_lr2lri(lr, &lri);
659 IFLR_UNLOCK(lr);