• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/infiniband/core/

Lines Matching refs:packet

54 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
166 struct ib_umad_packet *packet)
172 for (packet->mad.hdr.id = 0;
173 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
174 packet->mad.hdr.id++)
175 if (agent == __get_agent(file, packet->mad.hdr.id)) {
176 list_add_tail(&packet->list, &file->recv_list);
189 struct ib_umad_packet *packet)
192 list_del(&packet->list);
200 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
202 dequeue_send(file, packet);
203 ib_destroy_ah(packet->msg->ah);
204 ib_free_send_mad(packet->msg);
207 packet->length = IB_MGMT_MAD_HDR;
208 packet->mad.hdr.status = ETIMEDOUT;
209 if (!queue_packet(file, agent, packet))
212 kfree(packet);
219 struct ib_umad_packet *packet;
224 packet = kzalloc(sizeof *packet, GFP_KERNEL);
225 if (!packet)
228 packet->length = mad_recv_wc->mad_len;
229 packet->recv_wc = mad_recv_wc;
231 packet->mad.hdr.status = 0;
232 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
233 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
234 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
235 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
236 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
237 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
238 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
239 if (packet->mad.hdr.grh_present) {
246 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index;
247 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit;
248 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class;
249 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16);
250 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label);
253 if (queue_packet(file, agent, packet))
258 kfree(packet);
264 struct ib_umad_packet *packet, size_t count)
270 recv_buf = &packet->recv_wc->recv_buf;
271 if ((packet->length <= sizeof (*recv_buf->mad) &&
272 count < hdr_size(file) + packet->length) ||
273 (packet->length > sizeof (*recv_buf->mad) &&
277 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
281 seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
285 if (seg_payload < packet->length) {
290 if (count < hdr_size(file) + packet->length) {
300 for (left = packet->length - seg_payload, buf += seg_payload;
310 return hdr_size(file) + packet->length;
314 struct ib_umad_packet *packet, size_t count)
316 ssize_t size = hdr_size(file) + packet->length;
321 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
326 if (copy_to_user(buf, packet->mad.data, packet->length))
336 struct ib_umad_packet *packet;
357 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
358 list_del(&packet->list);
362 if (packet->recv_wc)
363 ret = copy_recv_mad(file, buf, packet, count);
365 ret = copy_send_mad(file, buf, packet, count);
368 /* Requeue packet */
370 list_add(&packet->list, &file->recv_list);
373 if (packet->recv_wc)
374 ib_free_recv_mad(packet->recv_wc);
375 kfree(packet);
413 struct ib_umad_packet *packet)
418 hdr = (struct ib_mad_hdr *) packet->mad.data;
438 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
449 struct ib_umad_packet *packet;
460 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
461 if (!packet)
464 if (copy_from_user(&packet->mad, buf, hdr_size(file))) {
469 if (packet->mad.hdr.id < 0 ||
470 packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
477 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) {
484 agent = __get_agent(file, packet->mad.hdr.id);
491 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid);
492 ah_attr.sl = packet->mad.hdr.sl;
493 ah_attr.src_path_bits = packet->mad.hdr.path_bits;
495 if (packet->mad.hdr.grh_present) {
497 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
498 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
499 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
500 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
501 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
510 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
522 packet->msg = ib_create_send_mad(agent,
523 be32_to_cpu(packet->mad.hdr.qpn),
524 packet->mad.hdr.pkey_index, rmpp_active,
526 if (IS_ERR(packet->msg)) {
527 ret = PTR_ERR(packet->msg);
531 packet->msg->ah = ah;
532 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
533 packet->msg->retries = packet->mad.hdr.retries;
534 packet->msg->context[0] = packet;
537 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
540 if (copy_from_user(packet->msg->mad + copy_offset,
547 ret = copy_rmpp_mad(packet->msg, buf);
557 if (!ib_response_mad(packet->msg->mad)) {
558 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
565 ret = is_duplicate(file, packet);
567 list_add_tail(&packet->list, &file->send_list);
574 ret = ib_post_send_mad(packet->msg, NULL);
582 dequeue_send(file, packet);
584 ib_free_send_mad(packet->msg);
590 kfree(packet);
842 struct ib_umad_packet *packet, *tmp;
852 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) {
853 if (packet->recv_wc)
854 ib_free_recv_mad(packet->recv_wc);
855 kfree(packet);