Lines Matching refs:kring

63 /* Write kring pointers (hwcur, hwtail) to the CSB.
93 /* Read kring pointers (head, cur, sync_flags) from the CSB.
136 sync_kloop_kring_dump(const char *title, const struct netmap_kring *kring)
138 nm_prinf("%s, kring %s, hwcur %d, rhead %d, "
140 title, kring->name, kring->nr_hwcur, kring->rhead,
141 kring->rcur, kring->rtail, kring->nr_hwtail);
148 struct netmap_kring *kring;
163 struct netmap_kring *kring = a->kring;
171 if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
175 num_slots = kring->nkr_num_slots;
181 /* Copy the application kring pointers from the CSB */
185 batch = shadow_ring.head - kring->nr_hwcur;
193 uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots);
204 if (nm_kr_txspace(kring) <= (num_slots >> 1)) {
209 shadow_ring.tail = kring->rtail;
210 if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {
212 netmap_ring_reinit(kring);
220 sync_kloop_kring_dump("pre txsync", kring);
223 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
237 sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur,
238 kring->nr_hwtail);
239 if (kring->rtail != kring->nr_hwtail) {
241 kring->rtail = kring->nr_hwtail;
246 sync_kloop_kring_dump("post txsync", kring);
261 if (shadow_ring.head == kring->rhead) {
275 if (shadow_ring.head != kring->rhead) {
284 if (nm_kr_txempty(kring)) {
292 nm_kr_put(kring);
305 sync_kloop_norxslots(struct netmap_kring *kring, uint32_t g_head)
307 return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head,
308 kring->nkr_num_slots - 1));
315 struct netmap_kring *kring = a->kring;
323 if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
327 num_slots = kring->nkr_num_slots;
330 num_slots = kring->nkr_num_slots;
336 /* Copy the application kring pointers from the CSB */
343 shadow_ring.tail = kring->rtail;
344 if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {
346 netmap_ring_reinit(kring);
354 sync_kloop_kring_dump("pre rxsync", kring);
357 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
370 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
371 sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, hwtail);
372 if (kring->rtail != hwtail) {
373 kring->rtail = hwtail;
381 sync_kloop_kring_dump("post rxsync", kring);
396 if (sync_kloop_norxslots(kring, shadow_ring.head)) {
410 if (!sync_kloop_norxslots(kring, shadow_ring.head)) {
419 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
420 if (unlikely(hwtail == kring->rhead ||
425 hwtail, kring->rhead, dry_cycles);
430 nm_kr_put(kring);
635 a->kring = NMR(na, NR_TX)[i + priv->np_qfirst[NR_TX]];
644 a->kring = NMR(na, NR_RX)[i + priv->np_qfirst[NR_RX]];
974 struct netmap_kring *kring, int flags)
985 kring->nr_hwcur = ktoa->hwcur;
986 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
989 if (((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring))
999 if (nm_kr_wouldblock(kring) || (flags & NAF_FORCE_RECLAIM)) {
1000 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
1001 &kring->nr_hwcur);
1009 if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
1014 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
1015 &kring->nr_hwcur);
1017 if (unlikely(!nm_kr_wouldblock(kring))) {
1023 kring->name, atok->head, atok->cur, ktoa->hwtail,
1024 kring->rhead, kring->rcur, kring->nr_hwtail);
1042 struct netmap_kring *kring, int flags)
1050 * First part: import newly received packets, by updating the kring
1052 * This also updates the kring hwcur.
1054 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur);
1055 kring->nr_kflags &= ~NKR_PENDINTR;
1061 if (kring->rhead != kring->nr_hwcur) {
1062 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
1070 if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
1075 nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail,
1076 &kring->nr_hwcur);
1078 if (!nm_kr_wouldblock(kring)) {
1084 if ((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring))
1091 kring->name, atok->head, atok->cur, ktoa->hwtail,
1092 kring->rhead, kring->rcur, kring->nr_hwtail);