Lines Matching defs:shdlc

3  * shdlc Link Layer Control
8 #define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
104 print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
126 static struct sk_buff *llc_shdlc_alloc_skb(const struct llc_shdlc *shdlc,
131 skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
132 shdlc->tx_tailroom + payload_len, GFP_KERNEL);
134 skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
140 static int llc_shdlc_send_s_frame(const struct llc_shdlc *shdlc,
148 skb = llc_shdlc_alloc_skb(shdlc, 0);
154 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
162 static int llc_shdlc_send_u_frame(const struct llc_shdlc *shdlc,
172 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
183 static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
186 int dnr = shdlc->dnr; /* MUST initially be < y_nr */
193 skb = skb_dequeue(&shdlc->ack_pending_q);
199 if (skb_queue_empty(&shdlc->ack_pending_q)) {
200 if (shdlc->t2_active) {
201 del_timer_sync(&shdlc->t2_timer);
202 shdlc->t2_active = false;
207 skb = skb_peek(&shdlc->ack_pending_q);
209 mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
211 shdlc->t2_active = true;
221 static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
229 if (shdlc->state != SHDLC_CONNECTED)
232 if (x_ns != shdlc->nr) {
233 llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
237 if (!shdlc->t1_active) {
238 shdlc->t1_active = true;
239 mod_timer(&shdlc->t1_timer, jiffies +
240 msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
245 shdlc->rcv_to_hci(shdlc->hdev, skb);
249 shdlc->nr = (shdlc->nr + 1) % 8;
251 if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
252 llc_shdlc_reset_t2(shdlc, y_nr);
254 shdlc->dnr = y_nr;
261 static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
265 if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
266 llc_shdlc_reset_t2(shdlc, y_nr);
267 shdlc->dnr = y_nr;
271 static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
275 pr_debug("ns reset to %d\n", shdlc->dnr);
277 while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
279 skb_queue_head(&shdlc->send_q, skb);
281 shdlc->ns = shdlc->dnr;
284 static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
290 if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
291 if (shdlc->t2_active) {
292 del_timer_sync(&shdlc->t2_timer);
293 shdlc->t2_active = false;
297 if (shdlc->dnr != y_nr) {
298 while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
299 skb = skb_dequeue(&shdlc->ack_pending_q);
304 llc_shdlc_requeue_ack_pending(shdlc);
309 static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
314 if (shdlc->state != SHDLC_CONNECTED)
319 llc_shdlc_rcv_ack(shdlc, nr);
320 if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
321 shdlc->rnr = false;
322 if (shdlc->send_q.qlen == 0) {
323 skb = llc_shdlc_alloc_skb(shdlc, 0);
325 skb_queue_tail(&shdlc->send_q, skb);
330 llc_shdlc_rcv_rej(shdlc, nr);
333 llc_shdlc_rcv_ack(shdlc, nr);
334 shdlc->rnr = true;
341 static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
345 del_timer_sync(&shdlc->connect_timer);
348 shdlc->ns = 0;
349 shdlc->nr = 0;
350 shdlc->dnr = 0;
352 shdlc->state = SHDLC_HALF_CONNECTED;
354 shdlc->state = SHDLC_DISCONNECTED;
357 shdlc->connect_result = r;
359 wake_up(shdlc->connect_wq);
362 static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc)
366 skb = llc_shdlc_alloc_skb(shdlc, 2);
373 return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
376 static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc)
380 skb = llc_shdlc_alloc_skb(shdlc, 0);
384 return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
387 static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
399 switch (shdlc->state) {
415 shdlc->w = w;
416 shdlc->srej_support = srej_support;
417 r = llc_shdlc_connect_send_ua(shdlc);
418 llc_shdlc_connect_complete(shdlc, r);
432 shdlc->hard_fault = -ECONNRESET;
439 if ((shdlc->state == SHDLC_CONNECTING &&
440 shdlc->connect_tries > 0) ||
441 (shdlc->state == SHDLC_NEGOTIATING)) {
442 llc_shdlc_connect_complete(shdlc, 0);
443 shdlc->state = SHDLC_CONNECTED;
453 static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
462 if (shdlc->rcv_q.qlen)
463 pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
465 while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
471 if (shdlc->state == SHDLC_HALF_CONNECTED)
472 shdlc->state = SHDLC_CONNECTED;
476 llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
479 if (shdlc->state == SHDLC_HALF_CONNECTED)
480 shdlc->state = SHDLC_CONNECTED;
484 llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
489 llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
512 static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
518 if (shdlc->send_q.qlen)
520 shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
521 shdlc->rnr == false ? "false" : "true",
522 shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
523 shdlc->ack_pending_q.qlen);
525 while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
526 (shdlc->rnr == false)) {
528 if (shdlc->t1_active) {
529 del_timer_sync(&shdlc->t1_timer);
530 shdlc->t1_active = false;
534 skb = skb_dequeue(&shdlc->send_q);
536 *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
537 shdlc->nr;
539 pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
540 shdlc->nr);
541 SHDLC_DUMP_SKB("shdlc frame written", skb);
543 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
545 shdlc->hard_fault = r;
549 shdlc->ns = (shdlc->ns + 1) % 8;
554 skb_queue_tail(&shdlc->ack_pending_q, skb);
556 if (shdlc->t2_active == false) {
557 shdlc->t2_active = true;
558 mod_timer(&shdlc->t2_timer, time_sent +
567 struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer);
569 schedule_work(&shdlc->sm_work);
574 struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer);
578 schedule_work(&shdlc->sm_work);
583 struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer);
587 schedule_work(&shdlc->sm_work);
592 struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
595 mutex_lock(&shdlc->state_mutex);
597 switch (shdlc->state) {
599 skb_queue_purge(&shdlc->rcv_q);
600 skb_queue_purge(&shdlc->send_q);
601 skb_queue_purge(&shdlc->ack_pending_q);
604 if (shdlc->hard_fault) {
605 llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
609 if (shdlc->connect_tries++ < 5)
610 r = llc_shdlc_connect_initiate(shdlc);
614 llc_shdlc_connect_complete(shdlc, r);
616 mod_timer(&shdlc->connect_timer, jiffies +
619 shdlc->state = SHDLC_NEGOTIATING;
623 if (timer_pending(&shdlc->connect_timer) == 0) {
624 shdlc->state = SHDLC_CONNECTING;
625 schedule_work(&shdlc->sm_work);
628 llc_shdlc_handle_rcv_queue(shdlc);
630 if (shdlc->hard_fault) {
631 llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
637 llc_shdlc_handle_rcv_queue(shdlc);
638 llc_shdlc_handle_send_queue(shdlc);
640 if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
643 shdlc->t1_active = false;
644 r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
645 shdlc->nr);
647 shdlc->hard_fault = r;
650 if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
653 shdlc->t2_active = false;
655 llc_shdlc_requeue_ack_pending(shdlc);
656 llc_shdlc_handle_send_queue(shdlc);
659 if (shdlc->hard_fault)
660 shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
665 mutex_unlock(&shdlc->state_mutex);
669 * Called from syscall context to establish shdlc link. Sleeps until
672 static int llc_shdlc_connect(struct llc_shdlc *shdlc)
676 mutex_lock(&shdlc->state_mutex);
678 shdlc->state = SHDLC_CONNECTING;
679 shdlc->connect_wq = &connect_wq;
680 shdlc->connect_tries = 0;
681 shdlc->connect_result = 1;
683 mutex_unlock(&shdlc->state_mutex);
685 schedule_work(&shdlc->sm_work);
687 wait_event(connect_wq, shdlc->connect_result != 1);
689 return shdlc->connect_result;
692 static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
694 mutex_lock(&shdlc->state_mutex);
696 shdlc->state = SHDLC_DISCONNECTED;
698 mutex_unlock(&shdlc->state_mutex);
700 schedule_work(&shdlc->sm_work);
704 * Receive an incoming shdlc frame. Frame has already been crc-validated.
708 static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
712 shdlc->hard_fault = -EREMOTEIO;
715 skb_queue_tail(&shdlc->rcv_q, skb);
718 schedule_work(&shdlc->sm_work);
726 struct llc_shdlc *shdlc;
731 shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
732 if (shdlc == NULL)
735 mutex_init(&shdlc->state_mutex);
736 shdlc->state = SHDLC_DISCONNECTED;
738 timer_setup(&shdlc->connect_timer, llc_shdlc_connect_timeout, 0);
739 timer_setup(&shdlc->t1_timer, llc_shdlc_t1_timeout, 0);
740 timer_setup(&shdlc->t2_timer, llc_shdlc_t2_timeout, 0);
742 shdlc->w = SHDLC_MAX_WINDOW;
743 shdlc->srej_support = SHDLC_SREJ_SUPPORT;
745 skb_queue_head_init(&shdlc->rcv_q);
746 skb_queue_head_init(&shdlc->send_q);
747 skb_queue_head_init(&shdlc->ack_pending_q);
749 INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
751 shdlc->hdev = hdev;
752 shdlc->xmit_to_drv = xmit_to_drv;
753 shdlc->rcv_to_hci = rcv_to_hci;
754 shdlc->tx_headroom = tx_headroom;
755 shdlc->tx_tailroom = tx_tailroom;
756 shdlc->llc_failure = llc_failure;
758 return shdlc;
763 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
765 skb_queue_purge(&shdlc->rcv_q);
766 skb_queue_purge(&shdlc->send_q);
767 skb_queue_purge(&shdlc->ack_pending_q);
769 kfree(shdlc);
774 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
776 return llc_shdlc_connect(shdlc);
781 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
783 llc_shdlc_disconnect(shdlc);
790 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
792 llc_shdlc_recv_frame(shdlc, skb);
797 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
799 skb_queue_tail(&shdlc->send_q, skb);
801 schedule_work(&shdlc->sm_work);