Lines Matching refs:pch

263 static void ppp_channel_push(struct channel *pch);
265 struct channel *pch);
272 struct channel *pch);
287 static int ppp_connect_channel(struct channel *pch, int unit);
288 static int ppp_disconnect_channel(struct channel *pch);
289 static void ppp_destroy_channel(struct channel *pch);
626 static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
628 write_lock_bh(&pch->upl);
629 if (pch->ppp ||
630 rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
631 write_unlock_bh(&pch->upl);
635 rcu_assign_pointer(pch->bridge, pchb);
636 write_unlock_bh(&pch->upl);
644 refcount_inc(&pch->file.refcnt);
645 rcu_assign_pointer(pchb->bridge, pch);
651 write_lock_bh(&pch->upl);
652 /* Re-read pch->bridge with upl held in case it was modified concurrently */
653 pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
654 RCU_INIT_POINTER(pch->bridge, NULL);
655 write_unlock_bh(&pch->upl);
665 static int ppp_unbridge_channels(struct channel *pch)
669 write_lock_bh(&pch->upl);
670 pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
672 write_unlock_bh(&pch->upl);
675 RCU_INIT_POINTER(pch->bridge, NULL);
676 write_unlock_bh(&pch->upl);
678 /* Only modify pchb if phcb->bridge points back to pch.
685 if (pchbb == pch)
691 if (pchbb == pch)
692 if (refcount_dec_and_test(&pch->file.refcnt))
693 ppp_destroy_channel(pch);
736 struct channel *pch, *pchb;
740 pch = PF_TO_CHANNEL(pf);
746 err = ppp_connect_channel(pch, unit);
750 err = ppp_disconnect_channel(pch);
768 err = ppp_bridge_channels(pch, pchb);
775 err = ppp_unbridge_channels(pch);
779 down_read(&pch->chan_sem);
780 chan = pch->chan;
784 up_read(&pch->chan_sem);
1585 struct channel *pch;
1593 pch = list_first_entry(&ppp->channels, struct channel, clist);
1594 chan = pch->chan;
1862 struct channel *pch;
1879 pch = list_entry(list, struct channel, clist);
1881 spin_lock(&pch->downl);
1882 if (pch->chan) {
1883 if (pch->chan->ops->start_xmit(pch->chan, skb))
1890 spin_unlock(&pch->downl);
1926 struct channel *pch;
1940 list_for_each_entry(pch, &ppp->channels, clist) {
1941 if (pch->chan) {
1942 pch->avail = 1;
1944 pch->speed = pch->chan->speed;
1946 pch->avail = 0;
1948 if (pch->avail) {
1949 if (skb_queue_empty(&pch->file.xq) ||
1950 !pch->had_frag) {
1951 if (pch->speed == 0)
1954 totspeed += pch->speed;
1956 pch->avail = 2;
1960 if (!pch->had_frag && i < ppp->nxchan)
2003 pch = list_entry(list, struct channel, clist);
2005 if (!pch->avail)
2012 if (pch->avail == 1) {
2016 pch->avail = 1;
2020 spin_lock(&pch->downl);
2021 if (pch->chan == NULL) {
2023 if (pch->speed == 0)
2026 totspeed -= pch->speed;
2028 spin_unlock(&pch->downl);
2029 pch->avail = 0;
2046 if (pch->speed == 0) {
2054 ((totspeed*totfree)/pch->speed)) - hdrlen;
2056 flen += ((totfree - nzero)*pch->speed)/totspeed;
2057 nbigger -= ((totfree - nzero)*pch->speed)/
2078 pch->avail = 2;
2079 spin_unlock(&pch->downl);
2088 mtu = pch->chan->mtu - (hdrlen - 2);
2115 chan = pch->chan;
2116 if (!skb_queue_empty(&pch->file.xq) ||
2118 skb_queue_tail(&pch->file.xq, frag);
2119 pch->had_frag = 1;
2124 spin_unlock(&pch->downl);
2131 spin_unlock(&pch->downl);
2141 static void __ppp_channel_push(struct channel *pch)
2146 spin_lock(&pch->downl);
2147 if (pch->chan) {
2148 while (!skb_queue_empty(&pch->file.xq)) {
2149 skb = skb_dequeue(&pch->file.xq);
2150 if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
2152 skb_queue_head(&pch->file.xq, skb);
2158 skb_queue_purge(&pch->file.xq);
2160 spin_unlock(&pch->downl);
2162 if (skb_queue_empty(&pch->file.xq)) {
2163 ppp = pch->ppp;
2169 static void ppp_channel_push(struct channel *pch)
2171 read_lock_bh(&pch->upl);
2172 if (pch->ppp) {
2173 (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
2174 __ppp_channel_push(pch);
2175 (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
2177 __ppp_channel_push(pch);
2179 read_unlock_bh(&pch->upl);
2193 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2197 ppp_receive_frame(ppp, skb, pch);
2248 static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
2253 pchb = rcu_dereference(pch->bridge);
2264 skb_scrub_packet(skb, !net_eq(pch->chan_net, pchb->chan_net));
2280 struct channel *pch = chan->ppp;
2283 if (!pch) {
2289 if (ppp_channel_bridge_input(pch, skb))
2292 read_lock_bh(&pch->upl);
2295 if (pch->ppp) {
2296 ++pch->ppp->dev->stats.rx_length_errors;
2297 ppp_receive_error(pch->ppp);
2303 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
2305 skb_queue_tail(&pch->file.rq, skb);
2307 while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
2308 (skb = skb_dequeue(&pch->file.rq)))
2310 wake_up_interruptible(&pch->file.rwait);
2312 ppp_do_recv(pch->ppp, skb, pch);
2316 read_unlock_bh(&pch->upl);
2323 struct channel *pch = chan->ppp;
2326 if (!pch)
2329 read_lock_bh(&pch->upl);
2330 if (pch->ppp) {
2335 ppp_do_recv(pch->ppp, skb, pch);
2338 read_unlock_bh(&pch->upl);
2346 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2354 ppp_receive_mp_frame(ppp, skb, pch);
2590 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2628 pch->lastseq = seq;
2879 struct channel *pch;
2882 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2883 if (!pch)
2888 pch->ppp = NULL;
2889 pch->chan = chan;
2890 pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
2891 chan->ppp = pch;
2892 init_ppp_file(&pch->file, CHANNEL);
2893 pch->file.hdrlen = chan->hdrlen;
2895 pch->lastseq = -1;
2897 init_rwsem(&pch->chan_sem);
2898 spin_lock_init(&pch->downl);
2899 rwlock_init(&pch->upl);
2902 pch->file.index = ++pn->last_channel_index;
2903 list_add(&pch->list, &pn->new_channels);
2915 struct channel *pch = chan->ppp;
2917 if (pch)
2918 return pch->file.index;
2927 struct channel *pch = chan->ppp;
2930 if (pch) {
2931 read_lock_bh(&pch->upl);
2932 if (pch->ppp)
2933 unit = pch->ppp->file.index;
2934 read_unlock_bh(&pch->upl);
2944 struct channel *pch = chan->ppp;
2947 if (pch) {
2948 read_lock_bh(&pch->upl);
2949 if (pch->ppp && pch->ppp->dev)
2950 name = pch->ppp->dev->name;
2951 read_unlock_bh(&pch->upl);
2964 struct channel *pch = chan->ppp;
2967 if (!pch)
2976 down_write(&pch->chan_sem);
2977 spin_lock_bh(&pch->downl);
2978 pch->chan = NULL;
2979 spin_unlock_bh(&pch->downl);
2980 up_write(&pch->chan_sem);
2981 ppp_disconnect_channel(pch);
2983 pn = ppp_pernet(pch->chan_net);
2985 list_del(&pch->list);
2988 ppp_unbridge_channels(pch);
2990 pch->file.dead = 1;
2991 wake_up_interruptible(&pch->file.rwait);
2993 if (refcount_dec_and_test(&pch->file.refcnt))
2994 ppp_destroy_channel(pch);
3004 struct channel *pch = chan->ppp;
3006 if (!pch)
3008 ppp_channel_push(pch);
3429 struct channel *pch;
3431 list_for_each_entry(pch, &pn->new_channels, list) {
3432 if (pch->file.index == unit) {
3433 list_move(&pch->list, &pn->all_channels);
3434 return pch;
3438 list_for_each_entry(pch, &pn->all_channels, list) {
3439 if (pch->file.index == unit)
3440 return pch;
3450 ppp_connect_channel(struct channel *pch, int unit)
3457 pn = ppp_pernet(pch->chan_net);
3463 write_lock_bh(&pch->upl);
3465 if (pch->ppp ||
3466 rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)))
3470 spin_lock_bh(&pch->downl);
3471 if (!pch->chan) {
3473 spin_unlock_bh(&pch->downl);
3478 spin_unlock_bh(&pch->downl);
3479 if (pch->file.hdrlen > ppp->file.hdrlen)
3480 ppp->file.hdrlen = pch->file.hdrlen;
3481 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
3484 list_add_tail(&pch->clist, &ppp->channels);
3486 pch->ppp = ppp;
3492 write_unlock_bh(&pch->upl);
3502 ppp_disconnect_channel(struct channel *pch)
3507 write_lock_bh(&pch->upl);
3508 ppp = pch->ppp;
3509 pch->ppp = NULL;
3510 write_unlock_bh(&pch->upl);
3514 list_del(&pch->clist);
3528 static void ppp_destroy_channel(struct channel *pch)
3530 put_net_track(pch->chan_net, &pch->ns_tracker);
3531 pch->chan_net = NULL;
3535 if (!pch->file.dead) {
3537 pr_err("ppp: destroying undead channel %p !\n", pch);
3540 skb_queue_purge(&pch->file.xq);
3541 skb_queue_purge(&pch->file.rq);
3542 kfree(pch);