• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/video/msm/
1/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
5 * Copyright (C) 2007 QUALCOMM Incorporated
6 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/fb.h>
20#include <linux/msm_mdp.h>
21#include <linux/interrupt.h>
22#include <linux/wait.h>
23#include <linux/clk.h>
24#include <linux/file.h>
25#include <linux/major.h>
26#include <linux/slab.h>
27
28#include <mach/msm_iomap.h>
29#include <mach/msm_fb.h>
30#include <linux/platform_device.h>
31
32#include "mdp_hw.h"
33
34struct class *mdp_class;
35
36#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
37
38static uint16_t mdp_default_ccs[] = {
39	0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
40	0x010, 0x080, 0x080
41};
42
43static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
44static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
45static struct msmfb_callback *dma_callback;
46static struct clk *clk;
47static unsigned int mdp_irq_mask;
48static DEFINE_SPINLOCK(mdp_lock);
49DEFINE_MUTEX(mdp_mutex);
50
51static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
52{
53	unsigned long irq_flags;
54	int ret = 0;
55
56	BUG_ON(!mask);
57
58	spin_lock_irqsave(&mdp_lock, irq_flags);
59	/* if the mask bits are already set return an error, this interrupt
60	 * is already enabled */
61	if (mdp_irq_mask & mask) {
62		printk(KERN_ERR "mdp irq already on already on %x %x\n",
63		       mdp_irq_mask, mask);
64		ret = -1;
65	}
66	/* if the mdp irq is not already enabled enable it */
67	if (!mdp_irq_mask) {
68		if (clk)
69			clk_enable(clk);
70		enable_irq(mdp->irq);
71	}
72
73	/* update the irq mask to reflect the fact that the interrupt is
74	 * enabled */
75	mdp_irq_mask |= mask;
76	spin_unlock_irqrestore(&mdp_lock, irq_flags);
77	return ret;
78}
79
80static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
81{
82	/* this interrupt is already disabled! */
83	if (!(mdp_irq_mask & mask)) {
84		printk(KERN_ERR "mdp irq already off %x %x\n",
85		       mdp_irq_mask, mask);
86		return -1;
87	}
88	/* update the irq mask to reflect the fact that the interrupt is
89	 * disabled */
90	mdp_irq_mask &= ~(mask);
91	/* if no one is waiting on the interrupt, disable it */
92	if (!mdp_irq_mask) {
93		disable_irq(mdp->irq);
94		if (clk)
95			clk_disable(clk);
96	}
97	return 0;
98}
99
100static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
101{
102	unsigned long irq_flags;
103	int ret;
104
105	spin_lock_irqsave(&mdp_lock, irq_flags);
106	ret = locked_disable_mdp_irq(mdp, mask);
107	spin_unlock_irqrestore(&mdp_lock, irq_flags);
108	return ret;
109}
110
111static irqreturn_t mdp_isr(int irq, void *data)
112{
113	uint32_t status;
114	unsigned long irq_flags;
115	struct mdp_info *mdp = data;
116
117	spin_lock_irqsave(&mdp_lock, irq_flags);
118
119	status = mdp_readl(mdp, MDP_INTR_STATUS);
120	mdp_writel(mdp, status, MDP_INTR_CLEAR);
121
122	status &= mdp_irq_mask;
123	if (status & DL0_DMA2_TERM_DONE) {
124		if (dma_callback) {
125			dma_callback->func(dma_callback);
126			dma_callback = NULL;
127		}
128		wake_up(&mdp_dma2_waitqueue);
129	}
130
131	if (status & DL0_ROI_DONE)
132		wake_up(&mdp_ppp_waitqueue);
133
134	if (status)
135		locked_disable_mdp_irq(mdp, status);
136
137	spin_unlock_irqrestore(&mdp_lock, irq_flags);
138	return IRQ_HANDLED;
139}
140
141static uint32_t mdp_check_mask(uint32_t mask)
142{
143	uint32_t ret;
144	unsigned long irq_flags;
145
146	spin_lock_irqsave(&mdp_lock, irq_flags);
147	ret = mdp_irq_mask & mask;
148	spin_unlock_irqrestore(&mdp_lock, irq_flags);
149	return ret;
150}
151
152static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
153{
154	int ret = 0;
155	unsigned long irq_flags;
156
157	wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
158
159	spin_lock_irqsave(&mdp_lock, irq_flags);
160	if (mdp_irq_mask & mask) {
161		locked_disable_mdp_irq(mdp, mask);
162		printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
163		       mask);
164		ret = -ETIMEDOUT;
165	}
166	spin_unlock_irqrestore(&mdp_lock, irq_flags);
167
168	return ret;
169}
170
171void mdp_dma_wait(struct mdp_device *mdp_dev)
172{
173#define MDP_MAX_TIMEOUTS 20
174	static int timeout_count;
175	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
176
177	if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
178		timeout_count++;
179	else
180		timeout_count = 0;
181
182	if (timeout_count > MDP_MAX_TIMEOUTS) {
183		printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
184		       MDP_MAX_TIMEOUTS);
185		BUG();
186	}
187}
188
189static int mdp_ppp_wait(struct mdp_info *mdp)
190{
191	return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
192}
193
194void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
195		     uint32_t width, uint32_t height, uint32_t x, uint32_t y,
196		     struct msmfb_callback *callback)
197{
198	uint32_t dma2_cfg;
199	uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
200
201	if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
202		printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
203		return;
204	}
205
206	dma_callback = callback;
207
208	dma2_cfg = DMA_PACK_TIGHT |
209		DMA_PACK_ALIGN_LSB |
210		DMA_PACK_PATTERN_RGB |
211		DMA_OUT_SEL_AHB |
212		DMA_IBUF_NONCONTIGUOUS;
213
214	dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
215
216	dma2_cfg |= DMA_OUT_SEL_MDDI;
217
218	dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
219
220	dma2_cfg |= DMA_DITHER_EN;
221
222	/* setup size, address, and stride */
223	mdp_writel(mdp, (height << 16) | (width),
224		   MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
225	mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
226	mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
227
228	/* 666 18BPP */
229	dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
230
231	/* set y & x offset and MDDI transaction parameters */
232	mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
233	mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
234	mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
235		   MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
236
237	mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
238
239	/* start DMA2 */
240	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
241}
242
243void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
244	     uint32_t width, uint32_t height, uint32_t x, uint32_t y,
245	     struct msmfb_callback *callback, int interface)
246{
247	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
248
249	if (interface == MSM_MDDI_PMDH_INTERFACE) {
250		mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
251				callback);
252	}
253}
254
255int get_img(struct mdp_img *img, struct fb_info *info,
256	    unsigned long *start, unsigned long *len,
257	    struct file **filep)
258{
259	int put_needed, ret = 0;
260	struct file *file;
261
262	file = fget_light(img->memory_id, &put_needed);
263	if (file == NULL)
264		return -1;
265
266	if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
267		*start = info->fix.smem_start;
268		*len = info->fix.smem_len;
269	} else
270		ret = -1;
271	fput_light(file, put_needed);
272
273	return ret;
274}
275
276void put_img(struct file *src_file, struct file *dst_file)
277{
278}
279
280int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
281	     struct mdp_blit_req *req)
282{
283	int ret;
284	unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
285	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
286	struct file *src_file = 0, *dst_file = 0;
287
288	if (unlikely(req->src_rect.h == 0 ||
289		     req->src_rect.w == 0)) {
290		printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
291		return -EINVAL;
292	}
293	if (unlikely(req->dst_rect.h == 0 ||
294		     req->dst_rect.w == 0))
295		return -EINVAL;
296
297	/* do this first so that if this fails, the caller can always
298	 * safely call put_img */
299	if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
300		printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
301				"memory\n");
302		return -EINVAL;
303	}
304
305	if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
306		printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
307				"memory\n");
308		return -EINVAL;
309	}
310	mutex_lock(&mdp_mutex);
311
312	/* transp_masking unimplemented */
313	req->transp_mask = MDP_TRANSP_NOP;
314	if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
315		      req->alpha != MDP_ALPHA_NOP ||
316		      HAS_ALPHA(req->src.format)) &&
317		     (req->flags & MDP_ROT_90 &&
318		      req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
319		int i;
320		unsigned int tiles = req->dst_rect.h / 16;
321		unsigned int remainder = req->dst_rect.h % 16;
322		req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
323		req->dst_rect.h = 16;
324		for (i = 0; i < tiles; i++) {
325			enable_mdp_irq(mdp, DL0_ROI_DONE);
326			ret = mdp_ppp_blit(mdp, req, src_file, src_start,
327					   src_len, dst_file, dst_start,
328					   dst_len);
329			if (ret)
330				goto err_bad_blit;
331			ret = mdp_ppp_wait(mdp);
332			if (ret)
333				goto err_wait_failed;
334			req->dst_rect.y += 16;
335			req->src_rect.x += req->src_rect.w;
336		}
337		if (!remainder)
338			goto end;
339		req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
340		req->dst_rect.h = remainder;
341	}
342	enable_mdp_irq(mdp, DL0_ROI_DONE);
343	ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
344			   dst_start,
345			   dst_len);
346	if (ret)
347		goto err_bad_blit;
348	ret = mdp_ppp_wait(mdp);
349	if (ret)
350		goto err_wait_failed;
351end:
352	put_img(src_file, dst_file);
353	mutex_unlock(&mdp_mutex);
354	return 0;
355err_bad_blit:
356	disable_mdp_irq(mdp, DL0_ROI_DONE);
357err_wait_failed:
358	put_img(src_file, dst_file);
359	mutex_unlock(&mdp_mutex);
360	return ret;
361}
362
363void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
364{
365	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
366
367	disp_id &= 0xf;
368	mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
369}
370
371int register_mdp_client(struct class_interface *cint)
372{
373	if (!mdp_class) {
374		pr_err("mdp: no mdp_class when registering mdp client\n");
375		return -ENODEV;
376	}
377	cint->class = mdp_class;
378	return class_interface_register(cint);
379}
380
381#include "mdp_csc_table.h"
382#include "mdp_scale_tables.h"
383
384int mdp_probe(struct platform_device *pdev)
385{
386	struct resource *resource;
387	int ret;
388	int n;
389	struct mdp_info *mdp;
390
391	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
392	if (!resource) {
393		pr_err("mdp: can not get mdp mem resource!\n");
394		return -ENOMEM;
395	}
396
397	mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
398	if (!mdp)
399		return -ENOMEM;
400
401	mdp->irq = platform_get_irq(pdev, 0);
402	if (mdp->irq < 0) {
403		pr_err("mdp: can not get mdp irq\n");
404		ret = mdp->irq;
405		goto error_get_irq;
406	}
407
408	mdp->base = ioremap(resource->start,
409			    resource->end - resource->start);
410	if (mdp->base == 0) {
411		printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
412		ret = -ENOMEM;
413		goto error_ioremap;
414	}
415
416	mdp->mdp_dev.dma = mdp_dma;
417	mdp->mdp_dev.dma_wait = mdp_dma_wait;
418	mdp->mdp_dev.blit = mdp_blit;
419	mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
420
421	clk = clk_get(&pdev->dev, "mdp_clk");
422	if (IS_ERR(clk)) {
423		printk(KERN_INFO "mdp: failed to get mdp clk");
424		return PTR_ERR(clk);
425	}
426
427	ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
428	if (ret)
429		goto error_request_irq;
430	disable_irq(mdp->irq);
431	mdp_irq_mask = 0;
432
433	/* debug interface write access */
434	mdp_writel(mdp, 1, 0x60);
435
436	mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
437	mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
438
439	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
440	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
441
442	for (n = 0; n < ARRAY_SIZE(csc_table); n++)
443		mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
444
445	/* clear up unused fg/main registers */
446	/* comp.plane 2&3 ystride */
447	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
448
449	/* unpacked pattern */
450	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
451	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
452	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
453	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
454	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
455	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
456	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
457	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
458	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
459
460	/* comp.plane 2 & 3 */
461	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
462	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
463
464	/* clear unused bg registers */
465	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
466	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
467	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
468	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
469	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
470
471	for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
472		mdp_writel(mdp, mdp_upscale_table[n].val,
473		       mdp_upscale_table[n].reg);
474
475	for (n = 0; n < 9; n++)
476		mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
477	mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
478	mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
479	mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
480
481	/* register mdp device */
482	mdp->mdp_dev.dev.parent = &pdev->dev;
483	mdp->mdp_dev.dev.class = mdp_class;
484
485	/* if you can remove the platform device you'd have to implement
486	 * this:
487	mdp_dev.release = mdp_class; */
488
489	ret = device_register(&mdp->mdp_dev.dev);
490	if (ret)
491		goto error_device_register;
492	return 0;
493
494error_device_register:
495	free_irq(mdp->irq, mdp);
496error_request_irq:
497	iounmap(mdp->base);
498error_get_irq:
499error_ioremap:
500	kfree(mdp);
501	return ret;
502}
503
504static struct platform_driver msm_mdp_driver = {
505	.probe = mdp_probe,
506	.driver = {.name = "msm_mdp"},
507};
508
509static int __init mdp_init(void)
510{
511	mdp_class = class_create(THIS_MODULE, "msm_mdp");
512	if (IS_ERR(mdp_class)) {
513		printk(KERN_ERR "Error creating mdp class\n");
514		return PTR_ERR(mdp_class);
515	}
516	return platform_driver_register(&msm_mdp_driver);
517}
518
519subsys_initcall(mdp_init);
520