Lines Matching refs:ab

92 	struct aiotx_buffer *ab;
95 ab = m->m_ext.ext_arg1;
96 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE);
102 struct aiotx_buffer *ab;
106 ab = m->m_ext.ext_arg1;
107 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE;
108 return (ab->ps.pages + npages);
1924 free_aiotx_buffer(struct aiotx_buffer *ab)
1930 if (refcount_release(&ab->refcount) == 0)
1933 job = ab->job;
1936 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages);
1937 free(ab, M_CXGBE);
1955 struct aiotx_buffer *ab = buffer;
1959 m->m_len, jobtotid(ab->job));
1961 free_aiotx_buffer(ab);
1971 struct aiotx_buffer *ab;
1992 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
1994 refcount_init(&ab->refcount, 1);
1995 ab->ps.pages = (vm_page_t *)(ab + 1);
1996 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start,
1997 VM_PROT_WRITE, ab->ps.pages, n);
1998 if (ab->ps.npages < 0) {
1999 free(ab, M_CXGBE);
2003 KASSERT(ab->ps.npages == n,
2004 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n));
2006 ab->ps.offset = pgoff;
2007 ab->ps.len = job->uaiocb.aio_nbytes;
2008 ab->job = job;
2009 job->backend1 = ab;
2012 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages);
2023 struct aiotx_buffer *ab;
2034 ab = job->backend1;
2043 if (ab == NULL) {
2047 ab = job->backend1;
2109 if (m->m_len > ab->ps.len - job->aio_sent) {
2110 m->m_len = ab->ps.len - job->aio_sent;
2136 refcount_acquire(&ab->refcount);
2137 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab,
2185 free_aiotx_buffer(ab);
2189 if (ab != NULL) {
2191 free_aiotx_buffer(ab);
2249 struct aiotx_buffer *ab;
2266 ab = job->backend1;
2267 if (ab != NULL)
2268 free_aiotx_buffer(ab);