1// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#include "pvr_ccb.h"
5#include "pvr_device.h"
6#include "pvr_drv.h"
7#include "pvr_free_list.h"
8#include "pvr_fw.h"
9#include "pvr_gem.h"
10#include "pvr_power.h"
11
12#include <drm/drm_managed.h>
13#include <linux/compiler.h>
14#include <linux/delay.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h>
17#include <linux/mutex.h>
18#include <linux/types.h>
19#include <linux/workqueue.h>
20
21#define RESERVE_SLOT_TIMEOUT (1 * HZ) /* 1s */
22#define RESERVE_SLOT_MIN_RETRIES 10
23
24static void
25ccb_ctrl_init(void *cpu_ptr, void *priv)
26{
27	struct rogue_fwif_ccb_ctl *ctrl = cpu_ptr;
28	struct pvr_ccb *pvr_ccb = priv;
29
30	ctrl->write_offset = 0;
31	ctrl->read_offset = 0;
32	ctrl->wrap_mask = pvr_ccb->num_cmds - 1;
33	ctrl->cmd_size = pvr_ccb->cmd_size;
34}
35
36/**
37 * pvr_ccb_init() - Initialise a CCB
38 * @pvr_dev: Device pointer.
39 * @pvr_ccb: Pointer to CCB structure to initialise.
40 * @num_cmds_log2: Log2 of number of commands in this CCB.
41 * @cmd_size: Command size for this CCB.
42 *
43 * Return:
44 *  * Zero on success, or
45 *  * Any error code returned by pvr_fw_object_create_and_map().
46 */
47static int
48pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,
49	     u32 num_cmds_log2, size_t cmd_size)
50{
51	u32 num_cmds = 1 << num_cmds_log2;
52	u32 ccb_size = num_cmds * cmd_size;
53	int err;
54
55	pvr_ccb->num_cmds = num_cmds;
56	pvr_ccb->cmd_size = cmd_size;
57
58	err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock);
59	if (err)
60		return err;
61
62	/*
63	 * Map CCB and control structure as uncached, so we don't have to flush
64	 * CPU cache repeatedly when polling for space.
65	 */
66	pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl),
67						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
68						     ccb_ctrl_init, pvr_ccb, &pvr_ccb->ctrl_obj);
69	if (IS_ERR(pvr_ccb->ctrl))
70		return PTR_ERR(pvr_ccb->ctrl);
71
72	pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size,
73						    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
74						    NULL, NULL, &pvr_ccb->ccb_obj);
75	if (IS_ERR(pvr_ccb->ccb)) {
76		err = PTR_ERR(pvr_ccb->ccb);
77		goto err_free_ctrl;
78	}
79
80	pvr_fw_object_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr);
81	pvr_fw_object_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr);
82
83	WRITE_ONCE(pvr_ccb->ctrl->write_offset, 0);
84	WRITE_ONCE(pvr_ccb->ctrl->read_offset, 0);
85	WRITE_ONCE(pvr_ccb->ctrl->wrap_mask, num_cmds - 1);
86	WRITE_ONCE(pvr_ccb->ctrl->cmd_size, cmd_size);
87
88	return 0;
89
90err_free_ctrl:
91	pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
92
93	return err;
94}
95
96/**
97 * pvr_ccb_fini() - Release CCB structure
98 * @pvr_ccb: CCB to release.
99 */
100void
101pvr_ccb_fini(struct pvr_ccb *pvr_ccb)
102{
103	pvr_fw_object_unmap_and_destroy(pvr_ccb->ccb_obj);
104	pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
105}
106
107/**
108 * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB
109 * @pvr_ccb: CCB to test.
110 * @write_offset: Address to store number of next available slot. May be %NULL.
111 *
112 * Caller must hold @pvr_ccb->lock.
113 *
114 * Return:
115 *  * %true if a slot is available, or
116 *  * %false if no slot is available.
117 */
118static __always_inline bool
119pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
120{
121	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
122	u32 next_write_offset = (READ_ONCE(ctrl->write_offset) + 1) & READ_ONCE(ctrl->wrap_mask);
123
124	lockdep_assert_held(&pvr_ccb->lock);
125
126	if (READ_ONCE(ctrl->read_offset) != next_write_offset) {
127		if (write_offset)
128			*write_offset = next_write_offset;
129		return true;
130	}
131
132	return false;
133}
134
135static void
136process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
137{
138	switch (cmd->cmd_type) {
139	case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
140		pvr_power_reset(pvr_dev, false);
141		break;
142
143	case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
144		pvr_free_list_process_reconstruct_req(pvr_dev,
145						      &cmd->cmd_data.cmd_freelists_reconstruction);
146		break;
147
148	case ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW:
149		pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs);
150		break;
151
152	default:
153		drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n",
154			 cmd->cmd_type);
155		break;
156	}
157}
158
159/**
160 * pvr_fwccb_process() - Process any pending FWCCB commands
161 * @pvr_dev: Target PowerVR device
162 */
163void pvr_fwccb_process(struct pvr_device *pvr_dev)
164{
165	struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb;
166	struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl;
167	u32 read_offset;
168
169	mutex_lock(&pvr_dev->fwccb.lock);
170
171	while ((read_offset = READ_ONCE(ctrl->read_offset)) != READ_ONCE(ctrl->write_offset)) {
172		struct rogue_fwif_fwccb_cmd cmd = fwccb[read_offset];
173
174		WRITE_ONCE(ctrl->read_offset, (read_offset + 1) & READ_ONCE(ctrl->wrap_mask));
175
176		/* Drop FWCCB lock while we process command. */
177		mutex_unlock(&pvr_dev->fwccb.lock);
178
179		process_fwccb_command(pvr_dev, &cmd);
180
181		mutex_lock(&pvr_dev->fwccb.lock);
182	}
183
184	mutex_unlock(&pvr_dev->fwccb.lock);
185}
186
187/**
188 * pvr_kccb_capacity() - Returns the maximum number of usable KCCB slots.
189 * @pvr_dev: Target PowerVR device
190 *
191 * Return:
192 *  * The maximum number of active slots.
193 */
194static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev)
195{
196	/* Capacity is the number of slot minus one to cope with the wrapping
197	 * mechanisms. If we were to use all slots, we might end up with
198	 * read_offset == write_offset, which the FW considers as a KCCB-is-empty
199	 * condition.
200	 */
201	return pvr_dev->kccb.slot_count - 1;
202}
203
204/**
205 * pvr_kccb_used_slot_count_locked() - Get the number of used slots
206 * @pvr_dev: Device pointer.
207 *
208 * KCCB lock must be held.
209 *
210 * Return:
211 *  * The number of slots currently used.
212 */
213static u32
214pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev)
215{
216	struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
217	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
218	u32 wr_offset = READ_ONCE(ctrl->write_offset);
219	u32 rd_offset = READ_ONCE(ctrl->read_offset);
220	u32 used_count;
221
222	lockdep_assert_held(&pvr_ccb->lock);
223
224	if (wr_offset >= rd_offset)
225		used_count = wr_offset - rd_offset;
226	else
227		used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset;
228
229	return used_count;
230}
231
232/**
233 * pvr_kccb_send_cmd_reserved_powered() - Send command to the KCCB, with the PM ref
234 * held and a slot pre-reserved
235 * @pvr_dev: Device pointer.
236 * @cmd: Command to sent.
237 * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
238 */
239void
240pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
241				   struct rogue_fwif_kccb_cmd *cmd,
242				   u32 *kccb_slot)
243{
244	struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
245	struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb;
246	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
247	u32 old_write_offset;
248	u32 new_write_offset;
249
250	WARN_ON(pvr_dev->lost);
251
252	mutex_lock(&pvr_ccb->lock);
253
254	if (WARN_ON(!pvr_dev->kccb.reserved_count))
255		goto out_unlock;
256
257	old_write_offset = READ_ONCE(ctrl->write_offset);
258
259	/* We reserved the slot, we should have one available. */
260	if (WARN_ON(!pvr_ccb_slot_available_locked(pvr_ccb, &new_write_offset)))
261		goto out_unlock;
262
263	memcpy(&kccb[old_write_offset], cmd,
264	       sizeof(struct rogue_fwif_kccb_cmd));
265	if (kccb_slot) {
266		*kccb_slot = old_write_offset;
267		/* Clear return status for this slot. */
268		WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset],
269			   ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE);
270	}
271	mb(); /* memory barrier */
272	WRITE_ONCE(ctrl->write_offset, new_write_offset);
273	pvr_dev->kccb.reserved_count--;
274
275	/* Kick MTS */
276	pvr_fw_mts_schedule(pvr_dev,
277			    PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK);
278
279out_unlock:
280	mutex_unlock(&pvr_ccb->lock);
281}
282
283/**
284 * pvr_kccb_try_reserve_slot() - Try to reserve a KCCB slot
285 * @pvr_dev: Device pointer.
286 *
287 * Return:
288 *  * true if a KCCB slot was reserved, or
289 *  * false otherwise.
290 */
291static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev)
292{
293	bool reserved = false;
294	u32 used_count;
295
296	mutex_lock(&pvr_dev->kccb.ccb.lock);
297
298	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
299	if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) {
300		pvr_dev->kccb.reserved_count++;
301		reserved = true;
302	}
303
304	mutex_unlock(&pvr_dev->kccb.ccb.lock);
305
306	return reserved;
307}
308
309/**
310 * pvr_kccb_reserve_slot_sync() - Try to reserve a slot synchronously
311 * @pvr_dev: Device pointer.
312 *
313 * Return:
314 *  * 0 on success, or
315 *  * -EBUSY if no slots were reserved after %RESERVE_SLOT_TIMEOUT, with a minimum of
316 *    %RESERVE_SLOT_MIN_RETRIES retries.
317 */
318static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
319{
320	unsigned long start_timestamp = jiffies;
321	bool reserved = false;
322	u32 retries = 0;
323
324	while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT ||
325	       retries < RESERVE_SLOT_MIN_RETRIES) {
326		reserved = pvr_kccb_try_reserve_slot(pvr_dev);
327		if (reserved)
328			break;
329
330		usleep_range(1, 50);
331
332		if (retries < U32_MAX)
333			retries++;
334	}
335
336	return reserved ? 0 : -EBUSY;
337}
338
339/**
340 * pvr_kccb_send_cmd_powered() - Send command to the KCCB, with a PM ref held
341 * @pvr_dev: Device pointer.
342 * @cmd: Command to sent.
343 * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
344 *
345 * Returns:
346 *  * Zero on success, or
347 *  * -EBUSY if timeout while waiting for a free KCCB slot.
348 */
349int
350pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
351			  u32 *kccb_slot)
352{
353	int err;
354
355	err = pvr_kccb_reserve_slot_sync(pvr_dev);
356	if (err)
357		return err;
358
359	pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot);
360	return 0;
361}
362
363/**
364 * pvr_kccb_send_cmd() - Send command to the KCCB
365 * @pvr_dev: Device pointer.
366 * @cmd: Command to sent.
367 * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
368 *
369 * Returns:
370 *  * Zero on success, or
371 *  * -EBUSY if timeout while waiting for a free KCCB slot.
372 */
373int
374pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
375		  u32 *kccb_slot)
376{
377	int err;
378
379	err = pvr_power_get(pvr_dev);
380	if (err)
381		return err;
382
383	err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot);
384
385	pvr_power_put(pvr_dev);
386
387	return err;
388}
389
390/**
391 * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete
392 * @pvr_dev: Device pointer.
393 * @slot_nr: KCCB slot to wait on.
394 * @timeout: Timeout length (in jiffies).
395 * @rtn_out: Location to store KCCB command result. May be %NULL.
396 *
397 * Returns:
398 *  * Zero on success, or
399 *  * -ETIMEDOUT on timeout.
400 */
401int
402pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr,
403			     u32 timeout, u32 *rtn_out)
404{
405	int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) &
406				     ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout);
407
408	if (ret && rtn_out)
409		*rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]);
410
411	return ret ? 0 : -ETIMEDOUT;
412}
413
414/**
415 * pvr_kccb_is_idle() - Returns whether the device's KCCB is idle
416 * @pvr_dev: Device pointer
417 *
418 * Returns:
419 *  * %true if the KCCB is idle (contains no commands), or
420 *  * %false if the KCCB contains pending commands.
421 */
422bool
423pvr_kccb_is_idle(struct pvr_device *pvr_dev)
424{
425	struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl;
426	bool idle;
427
428	mutex_lock(&pvr_dev->kccb.ccb.lock);
429
430	idle = (READ_ONCE(ctrl->write_offset) == READ_ONCE(ctrl->read_offset));
431
432	mutex_unlock(&pvr_dev->kccb.ccb.lock);
433
434	return idle;
435}
436
437static const char *
438pvr_kccb_fence_get_driver_name(struct dma_fence *f)
439{
440	return PVR_DRIVER_NAME;
441}
442
443static const char *
444pvr_kccb_fence_get_timeline_name(struct dma_fence *f)
445{
446	return "kccb";
447}
448
449static const struct dma_fence_ops pvr_kccb_fence_ops = {
450	.get_driver_name = pvr_kccb_fence_get_driver_name,
451	.get_timeline_name = pvr_kccb_fence_get_timeline_name,
452};
453
454/**
455 * struct pvr_kccb_fence - Fence object used to wait for a KCCB slot
456 */
457struct pvr_kccb_fence {
458	/** @base: Base dma_fence object. */
459	struct dma_fence base;
460
461	/** @node: Node used to insert the fence in the pvr_device::kccb::waiters list. */
462	struct list_head node;
463};
464
465/**
466 * pvr_kccb_wake_up_waiters() - Check the KCCB waiters
467 * @pvr_dev: Target PowerVR device
468 *
469 * Signal as many KCCB fences as we have slots available.
470 */
471void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev)
472{
473	struct pvr_kccb_fence *fence, *tmp_fence;
474	u32 used_count, available_count;
475
476	/* Wake up those waiting for KCCB slot execution. */
477	wake_up_all(&pvr_dev->kccb.rtn_q);
478
479	/* Then iterate over all KCCB fences and signal as many as we can. */
480	mutex_lock(&pvr_dev->kccb.ccb.lock);
481	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
482
483	if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev)))
484		goto out_unlock;
485
486	available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count;
487	list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) {
488		if (!available_count)
489			break;
490
491		list_del(&fence->node);
492		pvr_dev->kccb.reserved_count++;
493		available_count--;
494		dma_fence_signal(&fence->base);
495		dma_fence_put(&fence->base);
496	}
497
498out_unlock:
499	mutex_unlock(&pvr_dev->kccb.ccb.lock);
500}
501
502/**
503 * pvr_kccb_fini() - Cleanup device KCCB
504 * @pvr_dev: Target PowerVR device
505 */
506void pvr_kccb_fini(struct pvr_device *pvr_dev)
507{
508	pvr_ccb_fini(&pvr_dev->kccb.ccb);
509	WARN_ON(!list_empty(&pvr_dev->kccb.waiters));
510	WARN_ON(pvr_dev->kccb.reserved_count);
511}
512
513/**
514 * pvr_kccb_init() - Initialise device KCCB
515 * @pvr_dev: Target PowerVR device
516 *
517 * Returns:
518 *  * 0 on success, or
519 *  * Any error returned by pvr_ccb_init().
520 */
521int
522pvr_kccb_init(struct pvr_device *pvr_dev)
523{
524	pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
525	INIT_LIST_HEAD(&pvr_dev->kccb.waiters);
526	pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1);
527	spin_lock_init(&pvr_dev->kccb.fence_ctx.lock);
528
529	return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb,
530			    ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT,
531			    sizeof(struct rogue_fwif_kccb_cmd));
532}
533
534/**
535 * pvr_kccb_fence_alloc() - Allocate a pvr_kccb_fence object
536 *
537 * Return:
538 *  * NULL if the allocation fails, or
539 *  * A valid dma_fence pointer otherwise.
540 */
541struct dma_fence *pvr_kccb_fence_alloc(void)
542{
543	struct pvr_kccb_fence *kccb_fence;
544
545	kccb_fence = kzalloc(sizeof(*kccb_fence), GFP_KERNEL);
546	if (!kccb_fence)
547		return NULL;
548
549	return &kccb_fence->base;
550}
551
552/**
553 * pvr_kccb_fence_put() - Drop a KCCB fence reference
554 * @fence: The fence to drop the reference on.
555 *
556 * If the fence hasn't been initialized yet, dma_fence_free() is called. This
557 * way we have a single function taking care of both cases.
558 */
559void pvr_kccb_fence_put(struct dma_fence *fence)
560{
561	if (!fence)
562		return;
563
564	if (!fence->ops) {
565		dma_fence_free(fence);
566	} else {
567		WARN_ON(fence->ops != &pvr_kccb_fence_ops);
568		dma_fence_put(fence);
569	}
570}
571
572/**
573 * pvr_kccb_reserve_slot() - Reserve a KCCB slot for later use
574 * @pvr_dev: Target PowerVR device
575 * @f: KCCB fence object previously allocated with pvr_kccb_fence_alloc()
576 *
577 * Try to reserve a KCCB slot, and if there's no slot available,
578 * initializes the fence object and queue it to the waiters list.
579 *
580 * If NULL is returned, that means the slot is reserved. In that case,
581 * the @f is freed and shouldn't be accessed after that point.
582 *
583 * Return:
584 *  * NULL if a slot was available directly, or
585 *  * A valid dma_fence object to wait on if no slot was available.
586 */
587struct dma_fence *
588pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f)
589{
590	struct pvr_kccb_fence *fence = container_of(f, struct pvr_kccb_fence, base);
591	struct dma_fence *out_fence = NULL;
592	u32 used_count;
593
594	mutex_lock(&pvr_dev->kccb.ccb.lock);
595
596	used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
597	if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) {
598		dma_fence_init(&fence->base, &pvr_kccb_fence_ops,
599			       &pvr_dev->kccb.fence_ctx.lock,
600			       pvr_dev->kccb.fence_ctx.id,
601			       atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno));
602		out_fence = dma_fence_get(&fence->base);
603		list_add_tail(&fence->node, &pvr_dev->kccb.waiters);
604	} else {
605		pvr_kccb_fence_put(f);
606		pvr_dev->kccb.reserved_count++;
607	}
608
609	mutex_unlock(&pvr_dev->kccb.ccb.lock);
610
611	return out_fence;
612}
613
614/**
615 * pvr_kccb_release_slot() - Release a KCCB slot reserved with
616 * pvr_kccb_reserve_slot()
617 * @pvr_dev: Target PowerVR device
618 *
619 * Should only be called if something failed after the
620 * pvr_kccb_reserve_slot() call and you know you won't call
621 * pvr_kccb_send_cmd_reserved().
622 */
623void pvr_kccb_release_slot(struct pvr_device *pvr_dev)
624{
625	mutex_lock(&pvr_dev->kccb.ccb.lock);
626	if (!WARN_ON(!pvr_dev->kccb.reserved_count))
627		pvr_dev->kccb.reserved_count--;
628	mutex_unlock(&pvr_dev->kccb.ccb.lock);
629}
630
631/**
632 * pvr_fwccb_init() - Initialise device FWCCB
633 * @pvr_dev: Target PowerVR device
634 *
635 * Returns:
636 *  * 0 on success, or
637 *  * Any error returned by pvr_ccb_init().
638 */
639int
640pvr_fwccb_init(struct pvr_device *pvr_dev)
641{
642	return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb,
643			    ROGUE_FWIF_FWCCB_NUMCMDS_LOG2,
644			    sizeof(struct rogue_fwif_fwccb_cmd));
645}
646