Lines Matching refs:hd

78  * @hd: pointer to our gb_host_device structure
103 struct gb_host_device *hd;
136 static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
138 return (struct es2_ap_dev *)&hd->hd_priv;
217 static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
220 struct es2_ap_dev *es2 = hd_to_es2(hd);
390 static int message_send(struct gb_host_device *hd, u16 cport_id,
393 struct es2_ap_dev *es2 = hd_to_es2(hd);
405 if (!cport_id_valid(hd, cport_id)) {
455 struct gb_host_device *hd = message->operation->connection->hd;
456 struct es2_ap_dev *es2 = hd_to_es2(hd);
488 static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
491 struct es2_ap_dev *es2 = hd_to_es2(hd);
492 struct ida *id_map = &hd->cport_id_map;
498 dev_err(&hd->dev, "cport %d not available\n", cport_id);
505 dev_err(&hd->dev, "CDSI1 already in use\n");
516 ida_end = hd->num_cports - 1;
517 } else if (cport_id < hd->num_cports) {
521 dev_err(&hd->dev, "cport %d not available\n", cport_id);
528 static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
530 struct es2_ap_dev *es2 = hd_to_es2(hd);
538 ida_free(&hd->cport_id_map, cport_id);
541 static int cport_enable(struct gb_host_device *hd, u16 cport_id,
544 struct es2_ap_dev *es2 = hd_to_es2(hd);
562 dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
583 static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
585 struct es2_ap_dev *es2 = hd_to_es2(hd);
602 static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
604 struct es2_ap_dev *es2 = hd_to_es2(hd);
620 static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
623 struct es2_ap_dev *es2 = hd_to_es2(hd);
646 static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
649 struct es2_ap_dev *es2 = hd_to_es2(hd);
675 static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
677 struct es2_ap_dev *es2 = hd_to_es2(hd);
693 static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
696 struct es2_ap_dev *es2 = hd_to_es2(hd);
711 static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
714 struct es2_ap_dev *es2 = hd_to_es2(hd);
804 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
805 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
808 gb_hd_put(es2->hd);
815 struct gb_host_device *hd = urb->context;
843 if (cport_id_valid(hd, cport_id)) {
844 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
859 struct gb_host_device *hd = message->operation->connection->hd;
860 struct es2_ap_dev *es2 = hd_to_es2(hd);
874 greybus_message_sent(hd, message, status);
1248 struct gb_host_device *hd;
1270 hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1272 if (IS_ERR(hd)) {
1274 return PTR_ERR(hd);
1277 es2 = hd_to_es2(hd);
1278 es2->hd = hd;
1289 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1292 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1358 cport_in_callback, hd);
1417 retval = gb_hd_add(hd);
1428 gb_hd_del(hd);
1441 gb_hd_del(es2->hd);