Lines Matching defs:rdev

42 static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
46 if (c4iw_id_table_alloc(&rdev->resource.qid_table,
47 rdev->adap->vres.qp.start,
48 rdev->adap->vres.qp.size,
49 rdev->adap->vres.qp.size, 0)) {
54 for (i = rdev->adap->vres.qp.start;
55 i < rdev->adap->vres.qp.start + rdev->adap->vres.qp.size; i++)
56 if (!(i & rdev->qpmask))
57 c4iw_id_free(&rdev->resource.qid_table, i);
62 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
65 err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
69 err = c4iw_init_qid_table(rdev);
72 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
78 c4iw_id_table_free(&rdev->resource.qid_table);
80 c4iw_id_table_free(&rdev->resource.tpt_table);
104 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
118 qid = c4iw_get_resource(&rdev->resource.qid_table);
121 mutex_lock(&rdev->stats.lock);
122 rdev->stats.qid.cur += rdev->qpmask + 1;
123 mutex_unlock(&rdev->stats.lock);
124 for (i = qid+1; i & rdev->qpmask; i++) {
141 for (i = qid+1; i & rdev->qpmask; i++) {
152 mutex_lock(&rdev->stats.lock);
153 if (rdev->stats.qid.cur > rdev->stats.qid.max)
154 rdev->stats.qid.max = rdev->stats.qid.cur;
155 mutex_unlock(&rdev->stats.lock);
159 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
174 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
188 qid = c4iw_get_resource(&rdev->resource.qid_table);
191 mutex_lock(&rdev->stats.lock);
192 rdev->stats.qid.cur += rdev->qpmask + 1;
193 mutex_unlock(&rdev->stats.lock);
194 for (i = qid+1; i & rdev->qpmask; i++) {
211 for (i = qid; i & rdev->qpmask; i++) {
222 mutex_lock(&rdev->stats.lock);
223 if (rdev->stats.qid.cur > rdev->stats.qid.max)
224 rdev->stats.qid.max = rdev->stats.qid.cur;
225 mutex_unlock(&rdev->stats.lock);
229 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
255 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
259 vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
263 mutex_lock(&rdev->stats.lock);
265 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
266 if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
267 rdev->stats.pbl.max = rdev->stats.pbl.cur;
269 rdev->stats.pbl.fail++;
270 mutex_unlock(&rdev->stats.lock);
274 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
277 mutex_lock(&rdev->stats.lock);
278 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
279 mutex_unlock(&rdev->stats.lock);
280 vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
283 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
285 rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
286 rdev->adap->vres.pbl.start,
287 rdev->adap->vres.pbl.size,
289 if (!rdev->pbl_arena)
295 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
297 vmem_destroy(rdev->pbl_arena);
304 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
308 vmem_xalloc(rdev->rqt_arena,
316 device_get_nameunit(rdev->adap->dev));
317 mutex_lock(&rdev->stats.lock);
319 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
320 if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
321 rdev->stats.rqt.max = rdev->stats.rqt.cur;
323 rdev->stats.rqt.fail++;
324 mutex_unlock(&rdev->stats.lock);
328 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
331 mutex_lock(&rdev->stats.lock);
332 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
333 mutex_unlock(&rdev->stats.lock);
334 vmem_xfree(rdev->rqt_arena, addr,
338 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
340 rdev->rqt_arena = vmem_create("RQT_MEM_POOL",
341 rdev->adap->vres.rq.start,
342 rdev->adap->vres.rq.size,
344 if (!rdev->rqt_arena)
350 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
352 vmem_destroy(rdev->rqt_arena);