Lines Matching defs:dmm

32 #define DMM_DRIVER_NAME "dmm"
36 static struct dmm *omap_dmm;
76 static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
82 tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
84 dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
90 dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
94 status = dma_sync_wait(dmm->wa_dma_chan, cookie);
96 dev_err(dmm->dev, "i878 wa DMA copy failure\n");
98 dmaengine_terminate_all(dmm->wa_dma_chan);
102 static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
107 src = dmm->phys_base + reg;
108 dst = dmm->wa_dma_handle;
110 r = dmm_dma_copy(dmm, src, dst);
112 dev_err(dmm->dev, "sDMA read transfer timeout\n");
113 return readl(dmm->base + reg);
122 return readl(dmm->wa_dma_data);
125 static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
130 writel(val, dmm->wa_dma_data);
139 src = dmm->wa_dma_handle;
140 dst = dmm->phys_base + reg;
142 r = dmm_dma_copy(dmm, src, dst);
144 dev_err(dmm->dev, "sDMA write transfer timeout\n");
145 writel(val, dmm->base + reg);
149 static u32 dmm_read(struct dmm *dmm, u32 reg)
151 if (dmm->dmm_workaround) {
155 spin_lock_irqsave(&dmm->wa_lock, flags);
156 v = dmm_read_wa(dmm, reg);
157 spin_unlock_irqrestore(&dmm->wa_lock, flags);
161 return readl(dmm->base + reg);
165 static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
167 if (dmm->dmm_workaround) {
170 spin_lock_irqsave(&dmm->wa_lock, flags);
171 dmm_write_wa(dmm, val, reg);
172 spin_unlock_irqrestore(&dmm->wa_lock, flags);
174 writel(val, dmm->base + reg);
178 static int dmm_workaround_init(struct dmm *dmm)
182 spin_lock_init(&dmm->wa_lock);
184 dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32),
185 &dmm->wa_dma_handle, GFP_KERNEL);
186 if (!dmm->wa_dma_data)
192 dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
193 if (!dmm->wa_dma_chan) {
194 dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
201 static void dmm_workaround_uninit(struct dmm *dmm)
203 dma_release_channel(dmm->wa_dma_chan);
205 dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
214 /* dmm programming requires 16 byte aligned addresses */
232 struct dmm *dmm = engine->dmm;
237 r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
240 dev_err(dmm->dev,
250 dev_err(dmm->dev,
276 struct dmm *dmm = arg;
277 u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
281 dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
283 for (i = 0; i < dmm->num_engines; i++) {
285 dev_err(dmm->dev,
290 if (dmm->engines[i].async)
291 release_engine(&dmm->engines[i]);
293 complete(&dmm->engines[i].compl);
305 static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
321 if (!list_empty(&dmm->idle_head)) {
322 engine = list_entry(dmm->idle_head.next, struct refill_engine,
380 page_to_phys(pages[n]) : engine->dmm->dummy_pa;
395 struct dmm *dmm = engine->dmm;
398 dev_err(engine->dmm->dev, "need at least one txn\n");
417 dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
433 dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
438 dev_err(dmm->dev, "timed out waiting for done\n");
809 dev_err(&dev->dev, "failed to get dmm base address\n");
895 omap_dmm->engines[i].dmm = omap_dmm;
1080 /* early return if dmm/tiler device is not initialized */
1200 .compatible = "ti,omap4-dmm",
1204 .compatible = "ti,omap5-dmm",