Lines Matching refs:ic

48 void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
53 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
65 sge->addr = ic->i_recv_hdrs_dma[i];
67 sge->lkey = ic->i_pd->local_dma_lkey;
72 sge->lkey = ic->i_pd->local_dma_lkey;
122 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
126 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
128 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
130 free_percpu(ic->i_cache_incs.percpu);
156 void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
164 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
165 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
166 free_percpu(ic->i_cache_incs.percpu);
175 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
176 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
177 free_percpu(ic->i_cache_frags.percpu);
193 static void rds_ib_frag_free(struct rds_ib_connection *ic,
198 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
199 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
209 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
216 rds_ib_frag_free(ic, frag);
221 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
224 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
232 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
233 rds_ib_frag_free(ic, recv->r_frag);
238 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
242 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
243 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
246 static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
253 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
271 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
276 static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
283 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
286 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
311 struct rds_ib_connection *ic = conn->c_transport_data;
322 if (!ic->i_cache_incs.ready)
323 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
324 if (!ic->i_cache_frags.ready)
325 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
332 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
338 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
342 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
347 sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
385 struct rds_ib_connection *ic = conn->c_transport_data;
401 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
402 if (pos >= ic->i_recv_ring.w_nr) {
408 recv = &ic->i_recvs[pos];
420 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
438 if (ic->i_flowctl && posted)
442 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
458 (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
459 rds_ib_ring_empty(&ic->i_recv_ring))) {
574 /* ic starts out kzalloc()ed */
575 void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
577 struct ib_send_wr *wr = &ic->i_ack_wr;
578 struct ib_sge *sge = &ic->i_ack_sge;
580 sge->addr = ic->i_ack_dma;
582 sge->lkey = ic->i_pd->local_dma_lkey;
614 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
618 spin_lock_irqsave(&ic->i_ack_lock, flags);
619 ic->i_ack_next = seq;
621 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
622 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
625 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
630 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
632 spin_lock_irqsave(&ic->i_ack_lock, flags);
633 seq = ic->i_ack_next;
634 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
639 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
641 atomic64_set(&ic->i_ack_next, seq);
644 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
648 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
650 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
653 return atomic64_read(&ic->i_ack_next);
658 static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
660 struct rds_header *hdr = ic->i_ack;
664 seq = rds_ib_get_ack(ic);
666 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
668 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma,
674 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma,
677 ic->i_ack_queued = jiffies;
679 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
684 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
685 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
689 rds_ib_conn_error(ic->conn, "sending ack failed\n");
732 void rds_ib_attempt_ack(struct rds_ib_connection *ic)
736 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
739 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
745 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
747 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
751 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
752 rds_ib_send_ack(ic, adv_credits);
759 void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
761 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
762 rds_ib_attempt_ack(ic);
769 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
771 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
773 return rds_ib_get_ack(ic);
853 struct rds_ib_connection *ic = conn->c_transport_data;
854 struct rds_ib_incoming *ibinc = ic->i_ibinc;
856 dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
860 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
873 ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
875 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr,
911 rds_ib_frag_free(ic, recv->r_frag);
925 ic->i_ibinc = ibinc;
931 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
935 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
936 ic->i_recv_data_rem, hdr->h_flags);
954 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
955 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
957 ic->i_recv_data_rem = 0;
958 ic->i_ibinc = NULL;
980 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr,
984 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
988 struct rds_connection *conn = ic->conn;
997 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
998 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
1025 rds_ib_frag_free(ic, recv->r_frag);
1028 rds_ib_ring_free(&ic->i_recv_ring, 1);
1033 if (rds_ib_ring_empty(&ic->i_recv_ring))
1036 if (rds_ib_ring_low(&ic->i_recv_ring)) {
1045 struct rds_ib_connection *ic = conn->c_transport_data;
1049 rds_ib_attempt_ack(ic);