Lines Matching refs:rcd

88 	struct hfi1_ctxtdata *rcd;
94 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
105 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
111 if (rcd->ctxt == HFI1_CTRL_CTXT)
112 rcd->flags |= HFI1_CAP_DMA_RTAIL;
113 rcd->fast_handler = get_dma_rtail_setting(rcd) ?
117 hfi1_set_seq_cnt(rcd, 1);
119 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
120 if (!rcd->sc) {
124 hfi1_init_ctxt(rcd->sc);
137 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
139 if (!dd->rcd)
150 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
151 hfi1_free_ctxt(dd->rcd[i]);
154 kfree(dd->rcd);
155 dd->rcd = NULL;
160 * Helper routines for the receive context reference count (rcd and uctxt).
162 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
164 kref_init(&rcd->kref);
169 * @kref: pointer to an initialized rcd data structure
175 struct hfi1_ctxtdata *rcd =
178 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
179 rcd->dd->rcd[rcd->ctxt] = NULL;
180 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
182 hfi1_free_ctxtdata(rcd->dd, rcd);
184 kfree(rcd);
188 * hfi1_rcd_put - decrement reference for rcd
189 * @rcd: pointer to an initialized rcd data structure
193 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
195 if (rcd)
196 return kref_put(&rcd->kref, hfi1_rcd_free);
202 * hfi1_rcd_get - increment reference for rcd
203 * @rcd: pointer to an initialized rcd data structure
210 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
212 return kref_get_unless_zero(&rcd->kref);
216 * allocate_rcd_index - allocate an rcd index from the rcd array
218 * @rcd: rcd data structure to assign
221 * Find an empty index in the rcd array, and assign the given rcd to it.
226 struct hfi1_ctxtdata *rcd, u16 *index)
233 if (!dd->rcd[ctxt])
237 rcd->ctxt = ctxt;
238 dd->rcd[ctxt] = rcd;
239 hfi1_rcd_init(rcd);
255 * @ctxt: the index of an possilbe rcd
275 * @ctxt: the index of an possilbe rcd
277 * We need to protect access to the rcd array. If access is needed to
287 struct hfi1_ctxtdata *rcd = NULL;
290 if (dd->rcd[ctxt]) {
291 rcd = dd->rcd[ctxt];
292 if (!hfi1_rcd_get(rcd))
293 rcd = NULL;
297 return rcd;
308 struct hfi1_ctxtdata *rcd;
316 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
317 if (rcd) {
322 ret = allocate_rcd_index(dd, rcd, &ctxt);
325 kfree(rcd);
329 INIT_LIST_HEAD(&rcd->qp_wait_list);
330 hfi1_exp_tid_group_init(rcd);
331 rcd->ppd = ppd;
332 rcd->dd = dd;
333 rcd->numa_id = numa;
334 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
335 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
336 rcd->slow_handler = handle_receive_interrupt;
337 rcd->do_interrupt = rcd->slow_handler;
338 rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
340 mutex_init(&rcd->exp_mutex);
341 spin_lock_init(&rcd->exp_lock);
342 INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
343 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
345 hfi1_cdbg(PROC, "setting up context %u", rcd->ctxt);
357 rcd->rcv_array_groups++;
369 rcd->rcv_array_groups++;
375 rcd->eager_base = base * dd->rcv_entries.group_size;
377 rcd->rcvhdrq_cnt = rcvhdrcnt;
378 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
379 rcd->rhf_offset =
380 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
392 max_entries = rcd->rcv_array_groups *
395 rcd->egrbufs.count = round_down(rcvtids,
397 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
399 rcd->ctxt);
400 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
404 rcd->ctxt, rcd->egrbufs.count);
414 rcd->egrbufs.buffers =
415 kcalloc_node(rcd->egrbufs.count,
416 sizeof(*rcd->egrbufs.buffers),
418 if (!rcd->egrbufs.buffers)
420 rcd->egrbufs.rcvtids =
421 kcalloc_node(rcd->egrbufs.count,
422 sizeof(*rcd->egrbufs.rcvtids),
424 if (!rcd->egrbufs.rcvtids)
426 rcd->egrbufs.size = eager_buffer_size;
432 if (rcd->egrbufs.size < hfi1_max_mtu) {
433 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
436 rcd->ctxt, rcd->egrbufs.size);
438 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
442 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
444 if (!rcd->opstats)
448 hfi1_kern_init_ctxt_generations(rcd);
451 *context = rcd;
457 hfi1_free_ctxt(rcd);
463 * @rcd: pointer to an initialized rcd data structure
471 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
473 hfi1_rcd_put(rcd);
680 struct hfi1_ctxtdata *rcd;
687 rcd = hfi1_rcd_get_by_index(dd, i);
690 HFI1_RCVCTRL_TAILUPD_DIS, rcd);
691 hfi1_rcd_put(rcd);
702 struct hfi1_ctxtdata *rcd;
714 rcd = hfi1_rcd_get_by_index(dd, i);
715 if (!rcd)
718 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
720 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
722 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
724 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
728 hfi1_rcvctrl(dd, rcvmask, rcd);
729 sc_enable(rcd->sc);
730 hfi1_rcd_put(rcd);
848 struct hfi1_ctxtdata *rcd;
878 /* dd->rcd can be NULL if early initialization failed */
879 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
886 rcd = hfi1_rcd_get_by_index(dd, i);
887 if (!rcd)
890 lastfail = hfi1_create_rcvhdrq(dd, rcd);
892 lastfail = hfi1_setup_eagerbufs(rcd);
894 lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
901 hfi1_rcd_put(rcd);
1007 struct hfi1_ctxtdata *rcd;
1031 rcd = hfi1_rcd_get_by_index(dd, i);
1036 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1037 hfi1_rcd_put(rcd);
1080 * @rcd: the ctxtdata structure
1085 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1089 if (!rcd)
1092 if (rcd->rcvhdrq) {
1093 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1094 rcd->rcvhdrq, rcd->rcvhdrq_dma);
1095 rcd->rcvhdrq = NULL;
1096 if (hfi1_rcvhdrtail_kvaddr(rcd)) {
1098 (void *)hfi1_rcvhdrtail_kvaddr(rcd),
1099 rcd->rcvhdrqtailaddr_dma);
1100 rcd->rcvhdrtail_kvaddr = NULL;
1105 kfree(rcd->egrbufs.rcvtids);
1106 rcd->egrbufs.rcvtids = NULL;
1108 for (e = 0; e < rcd->egrbufs.alloced; e++) {
1109 if (rcd->egrbufs.buffers[e].addr)
1111 rcd->egrbufs.buffers[e].len,
1112 rcd->egrbufs.buffers[e].addr,
1113 rcd->egrbufs.buffers[e].dma);
1115 kfree(rcd->egrbufs.buffers);
1116 rcd->egrbufs.alloced = 0;
1117 rcd->egrbufs.buffers = NULL;
1119 sc_free(rcd->sc);
1120 rcd->sc = NULL;
1122 vfree(rcd->subctxt_uregbase);
1123 vfree(rcd->subctxt_rcvegrbuf);
1124 vfree(rcd->subctxt_rcvhdr_base);
1125 kfree(rcd->opstats);
1127 rcd->subctxt_uregbase = NULL;
1128 rcd->subctxt_rcvegrbuf = NULL;
1129 rcd->subctxt_rcvhdr_base = NULL;
1130 rcd->opstats = NULL;
1512 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1513 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1515 if (rcd) {
1516 hfi1_free_ctxt_rcv_groups(rcd);
1517 hfi1_free_ctxt(rcd);
1521 kfree(dd->rcd);
1522 dd->rcd = NULL;
1752 * @rcd: the context data
1758 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1762 if (!rcd->rcvhdrq) {
1763 amt = rcvhdrq_size(rcd);
1765 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1766 &rcd->rcvhdrq_dma,
1769 if (!rcd->rcvhdrq) {
1772 amt, rcd->ctxt);
1776 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1777 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1778 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1780 &rcd->rcvhdrqtailaddr_dma,
1782 if (!rcd->rcvhdrtail_kvaddr)
1787 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
1788 rcd->rcvhdrq_cnt);
1795 rcd->ctxt);
1796 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1797 rcd->rcvhdrq_dma);
1798 rcd->rcvhdrq = NULL;
1806 * @rcd: the context we are setting up.
1813 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1815 struct hfi1_devdata *dd = rcd->dd;
1828 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1829 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1834 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1835 rcd->egrbufs.rcvtid_size = round_mtu;
1841 if (rcd->egrbufs.size <= (1 << 20))
1842 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1843 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1845 while (alloced_bytes < rcd->egrbufs.size &&
1846 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1847 rcd->egrbufs.buffers[idx].addr =
1849 rcd->egrbufs.rcvtid_size,
1850 &rcd->egrbufs.buffers[idx].dma,
1852 if (rcd->egrbufs.buffers[idx].addr) {
1853 rcd->egrbufs.buffers[idx].len =
1854 rcd->egrbufs.rcvtid_size;
1855 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1856 rcd->egrbufs.buffers[idx].addr;
1857 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1858 rcd->egrbufs.buffers[idx].dma;
1859 rcd->egrbufs.alloced++;
1860 alloced_bytes += rcd->egrbufs.rcvtid_size;
1872 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1873 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1875 rcd->ctxt);
1880 new_size = rcd->egrbufs.rcvtid_size / 2;
1888 rcd->egrbufs.rcvtid_size = new_size;
1896 rcd->egrbufs.alloced = 0;
1898 if (i >= rcd->egrbufs.count)
1900 rcd->egrbufs.rcvtids[i].dma =
1901 rcd->egrbufs.buffers[j].dma + offset;
1902 rcd->egrbufs.rcvtids[i].addr =
1903 rcd->egrbufs.buffers[j].addr + offset;
1904 rcd->egrbufs.alloced++;
1905 if ((rcd->egrbufs.buffers[j].dma + offset +
1907 (rcd->egrbufs.buffers[j].dma +
1908 rcd->egrbufs.buffers[j].len)) {
1915 rcd->egrbufs.rcvtid_size = new_size;
1918 rcd->egrbufs.numbufs = idx;
1919 rcd->egrbufs.size = alloced_bytes;
1923 rcd->ctxt, rcd->egrbufs.alloced,
1924 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1931 rcd->egrbufs.threshold =
1932 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1938 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1939 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1940 rcd->expected_count = max_entries - egrtop;
1941 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1942 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1944 rcd->expected_base = rcd->eager_base + egrtop;
1946 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
1947 rcd->eager_base, rcd->expected_base);
1949 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
1952 rcd->ctxt, rcd->egrbufs.rcvtid_size);
1957 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
1958 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
1959 rcd->egrbufs.rcvtids[idx].dma, order);
1966 for (idx = 0; idx < rcd->egrbufs.alloced &&
1967 rcd->egrbufs.buffers[idx].addr;
1970 rcd->egrbufs.buffers[idx].len,
1971 rcd->egrbufs.buffers[idx].addr,
1972 rcd->egrbufs.buffers[idx].dma);
1973 rcd->egrbufs.buffers[idx].addr = NULL;
1974 rcd->egrbufs.buffers[idx].dma = 0;
1975 rcd->egrbufs.buffers[idx].len = 0;