• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/w1/masters/
1/*
2 * drivers/w1/masters/omap_hdq.c
3 *
4 * Copyright (C) 2007 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/clk.h>
18#include <linux/io.h>
19#include <linux/sched.h>
20
21#include <asm/irq.h>
22#include <mach/hardware.h>
23
24#include "../w1.h"
25#include "../w1_int.h"
26
27#define	MOD_NAME	"OMAP_HDQ:"
28
29#define OMAP_HDQ_REVISION			0x00
30#define OMAP_HDQ_TX_DATA			0x04
31#define OMAP_HDQ_RX_DATA			0x08
32#define OMAP_HDQ_CTRL_STATUS			0x0c
33#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK	(1<<6)
34#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE	(1<<5)
35#define OMAP_HDQ_CTRL_STATUS_GO			(1<<4)
36#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION	(1<<2)
37#define OMAP_HDQ_CTRL_STATUS_DIR		(1<<1)
38#define OMAP_HDQ_CTRL_STATUS_MODE		(1<<0)
39#define OMAP_HDQ_INT_STATUS			0x10
40#define OMAP_HDQ_INT_STATUS_TXCOMPLETE		(1<<2)
41#define OMAP_HDQ_INT_STATUS_RXCOMPLETE		(1<<1)
42#define OMAP_HDQ_INT_STATUS_TIMEOUT		(1<<0)
43#define OMAP_HDQ_SYSCONFIG			0x14
44#define OMAP_HDQ_SYSCONFIG_SOFTRESET		(1<<1)
45#define OMAP_HDQ_SYSCONFIG_AUTOIDLE		(1<<0)
46#define OMAP_HDQ_SYSSTATUS			0x18
47#define OMAP_HDQ_SYSSTATUS_RESETDONE		(1<<0)
48
49#define OMAP_HDQ_FLAG_CLEAR			0
50#define OMAP_HDQ_FLAG_SET			1
51#define OMAP_HDQ_TIMEOUT			(HZ/5)
52
53#define OMAP_HDQ_MAX_USER			4
54
55static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
56static int w1_id;
57
58struct hdq_data {
59	struct device		*dev;
60	void __iomem		*hdq_base;
61	/* lock status update */
62	struct  mutex		hdq_mutex;
63	int			hdq_usecount;
64	struct	clk		*hdq_ick;
65	struct	clk		*hdq_fck;
66	u8			hdq_irqstatus;
67	/* device lock */
68	spinlock_t		hdq_spinlock;
69	/*
70	 * Used to control the call to omap_hdq_get and omap_hdq_put.
71	 * HDQ Protocol: Write the CMD|REG_address first, followed by
72	 * the data wrire or read.
73	 */
74	int			init_trans;
75};
76
77static int __devinit omap_hdq_probe(struct platform_device *pdev);
78static int omap_hdq_remove(struct platform_device *pdev);
79
80static struct platform_driver omap_hdq_driver = {
81	.probe =	omap_hdq_probe,
82	.remove =	omap_hdq_remove,
83	.driver =	{
84		.name =	"omap_hdq",
85	},
86};
87
88static u8 omap_w1_read_byte(void *_hdq);
89static void omap_w1_write_byte(void *_hdq, u8 byte);
90static u8 omap_w1_reset_bus(void *_hdq);
91static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
92		u8 search_type,	w1_slave_found_callback slave_found);
93
94
95static struct w1_bus_master omap_w1_master = {
96	.read_byte	= omap_w1_read_byte,
97	.write_byte	= omap_w1_write_byte,
98	.reset_bus	= omap_w1_reset_bus,
99	.search		= omap_w1_search_bus,
100};
101
102/* HDQ register I/O routines */
103static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
104{
105	return __raw_readb(hdq_data->hdq_base + offset);
106}
107
108static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
109{
110	__raw_writeb(val, hdq_data->hdq_base + offset);
111}
112
113static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
114			u8 val, u8 mask)
115{
116	u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
117			| (val & mask);
118	__raw_writeb(new_val, hdq_data->hdq_base + offset);
119
120	return new_val;
121}
122
123/*
124 * Wait for one or more bits in flag change.
125 * HDQ_FLAG_SET: wait until any bit in the flag is set.
126 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
127 * return 0 on success and -ETIMEDOUT in the case of timeout.
128 */
129static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
130		u8 flag, u8 flag_set, u8 *status)
131{
132	int ret = 0;
133	unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
134
135	if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
136		/* wait for the flag clear */
137		while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
138			&& time_before(jiffies, timeout)) {
139			schedule_timeout_uninterruptible(1);
140		}
141		if (*status & flag)
142			ret = -ETIMEDOUT;
143	} else if (flag_set == OMAP_HDQ_FLAG_SET) {
144		/* wait for the flag set */
145		while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
146			&& time_before(jiffies, timeout)) {
147			schedule_timeout_uninterruptible(1);
148		}
149		if (!(*status & flag))
150			ret = -ETIMEDOUT;
151	} else
152		return -EINVAL;
153
154	return ret;
155}
156
157/* write out a byte and fill *status with HDQ_INT_STATUS */
158static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
159{
160	int ret;
161	u8 tmp_status;
162	unsigned long irqflags;
163
164	*status = 0;
165
166	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
167	/* clear interrupt flags via a dummy read */
168	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
169	/* ISR loads it with new INT_STATUS */
170	hdq_data->hdq_irqstatus = 0;
171	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
172
173	hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
174
175	/* set the GO bit */
176	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
177		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
178	/* wait for the TXCOMPLETE bit */
179	ret = wait_event_timeout(hdq_wait_queue,
180		hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
181	if (ret == 0) {
182		dev_dbg(hdq_data->dev, "TX wait elapsed\n");
183		goto out;
184	}
185
186	*status = hdq_data->hdq_irqstatus;
187	/* check irqstatus */
188	if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
189		dev_dbg(hdq_data->dev, "timeout waiting for"
190			"TXCOMPLETE/RXCOMPLETE, %x", *status);
191		ret = -ETIMEDOUT;
192		goto out;
193	}
194
195	/* wait for the GO bit return to zero */
196	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
197			OMAP_HDQ_CTRL_STATUS_GO,
198			OMAP_HDQ_FLAG_CLEAR, &tmp_status);
199	if (ret) {
200		dev_dbg(hdq_data->dev, "timeout waiting GO bit"
201			"return to zero, %x", tmp_status);
202	}
203
204out:
205	return ret;
206}
207
208/* HDQ Interrupt service routine */
209static irqreturn_t hdq_isr(int irq, void *_hdq)
210{
211	struct hdq_data *hdq_data = _hdq;
212	unsigned long irqflags;
213
214	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
215	hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
216	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
217	dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
218
219	if (hdq_data->hdq_irqstatus &
220		(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
221		| OMAP_HDQ_INT_STATUS_TIMEOUT)) {
222		/* wake up sleeping process */
223		wake_up(&hdq_wait_queue);
224	}
225
226	return IRQ_HANDLED;
227}
228
229/* HDQ Mode: always return success */
230static u8 omap_w1_reset_bus(void *_hdq)
231{
232	return 0;
233}
234
235/* W1 search callback function */
236static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
237		u8 search_type, w1_slave_found_callback slave_found)
238{
239	u64 module_id, rn_le, cs, id;
240
241	if (w1_id)
242		module_id = w1_id;
243	else
244		module_id = 0x1;
245
246	rn_le = cpu_to_le64(module_id);
247	/*
248	 * HDQ might not obey truly the 1-wire spec.
249	 * So calculate CRC based on module parameter.
250	 */
251	cs = w1_calc_crc8((u8 *)&rn_le, 7);
252	id = (cs << 56) | module_id;
253
254	slave_found(master_dev, id);
255}
256
257static int _omap_hdq_reset(struct hdq_data *hdq_data)
258{
259	int ret;
260	u8 tmp_status;
261
262	hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
263	/*
264	 * Select HDQ mode & enable clocks.
265	 * It is observed that INT flags can't be cleared via a read and GO/INIT
266	 * won't return to zero if interrupt is disabled. So we always enable
267	 * interrupt.
268	 */
269	hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
270		OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
271		OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
272
273	/* wait for reset to complete */
274	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
275		OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
276	if (ret)
277		dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
278				tmp_status);
279	else {
280		hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
281			OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
282			OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
283		hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
284			OMAP_HDQ_SYSCONFIG_AUTOIDLE);
285	}
286
287	return ret;
288}
289
290/* Issue break pulse to the device */
291static int omap_hdq_break(struct hdq_data *hdq_data)
292{
293	int ret = 0;
294	u8 tmp_status;
295	unsigned long irqflags;
296
297	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
298	if (ret < 0) {
299		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
300		ret = -EINTR;
301		goto rtn;
302	}
303
304	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
305	/* clear interrupt flags via a dummy read */
306	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
307	/* ISR loads it with new INT_STATUS */
308	hdq_data->hdq_irqstatus = 0;
309	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
310
311	/* set the INIT and GO bit */
312	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
313		OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
314		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
315		OMAP_HDQ_CTRL_STATUS_GO);
316
317	/* wait for the TIMEOUT bit */
318	ret = wait_event_timeout(hdq_wait_queue,
319		hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
320	if (ret == 0) {
321		dev_dbg(hdq_data->dev, "break wait elapsed\n");
322		ret = -EINTR;
323		goto out;
324	}
325
326	tmp_status = hdq_data->hdq_irqstatus;
327	/* check irqstatus */
328	if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
329		dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
330				tmp_status);
331		ret = -ETIMEDOUT;
332		goto out;
333	}
334	/*
335	 * wait for both INIT and GO bits rerurn to zero.
336	 * zero wait time expected for interrupt mode.
337	 */
338	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
339			OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
340			OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
341			&tmp_status);
342	if (ret)
343		dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
344			"return to zero, %x", tmp_status);
345
346out:
347	mutex_unlock(&hdq_data->hdq_mutex);
348rtn:
349	return ret;
350}
351
352static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
353{
354	int ret = 0;
355	u8 status;
356	unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
357
358	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
359	if (ret < 0) {
360		ret = -EINTR;
361		goto rtn;
362	}
363
364	if (!hdq_data->hdq_usecount) {
365		ret = -EINVAL;
366		goto out;
367	}
368
369	if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
370		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
371			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
372			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
373		/*
374		 * The RX comes immediately after TX. It
375		 * triggers another interrupt before we
376		 * sleep. So we have to wait for RXCOMPLETE bit.
377		 */
378		while (!(hdq_data->hdq_irqstatus
379			& OMAP_HDQ_INT_STATUS_RXCOMPLETE)
380			&& time_before(jiffies, timeout)) {
381			schedule_timeout_uninterruptible(1);
382		}
383		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
384			OMAP_HDQ_CTRL_STATUS_DIR);
385		status = hdq_data->hdq_irqstatus;
386		/* check irqstatus */
387		if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
388			dev_dbg(hdq_data->dev, "timeout waiting for"
389				"RXCOMPLETE, %x", status);
390			ret = -ETIMEDOUT;
391			goto out;
392		}
393	}
394	/* the data is ready. Read it in! */
395	*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
396out:
397	mutex_unlock(&hdq_data->hdq_mutex);
398rtn:
399	return 0;
400
401}
402
403/* Enable clocks and set the controller to HDQ mode */
404static int omap_hdq_get(struct hdq_data *hdq_data)
405{
406	int ret = 0;
407
408	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
409	if (ret < 0) {
410		ret = -EINTR;
411		goto rtn;
412	}
413
414	if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
415		dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
416		ret = -EINVAL;
417		goto out;
418	} else {
419		hdq_data->hdq_usecount++;
420		try_module_get(THIS_MODULE);
421		if (1 == hdq_data->hdq_usecount) {
422			if (clk_enable(hdq_data->hdq_ick)) {
423				dev_dbg(hdq_data->dev, "Can not enable ick\n");
424				ret = -ENODEV;
425				goto clk_err;
426			}
427			if (clk_enable(hdq_data->hdq_fck)) {
428				dev_dbg(hdq_data->dev, "Can not enable fck\n");
429				clk_disable(hdq_data->hdq_ick);
430				ret = -ENODEV;
431				goto clk_err;
432			}
433
434			/* make sure HDQ is out of reset */
435			if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
436				OMAP_HDQ_SYSSTATUS_RESETDONE)) {
437				ret = _omap_hdq_reset(hdq_data);
438				if (ret)
439					/* back up the count */
440					hdq_data->hdq_usecount--;
441			} else {
442				/* select HDQ mode & enable clocks */
443				hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
444					OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
445					OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
446				hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
447					OMAP_HDQ_SYSCONFIG_AUTOIDLE);
448				hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
449			}
450		}
451	}
452
453clk_err:
454	clk_put(hdq_data->hdq_ick);
455	clk_put(hdq_data->hdq_fck);
456out:
457	mutex_unlock(&hdq_data->hdq_mutex);
458rtn:
459	return ret;
460}
461
462/* Disable clocks to the module */
463static int omap_hdq_put(struct hdq_data *hdq_data)
464{
465	int ret = 0;
466
467	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
468	if (ret < 0)
469		return -EINTR;
470
471	if (0 == hdq_data->hdq_usecount) {
472		dev_dbg(hdq_data->dev, "attempt to decrement use count"
473			"when it is zero");
474		ret = -EINVAL;
475	} else {
476		hdq_data->hdq_usecount--;
477		module_put(THIS_MODULE);
478		if (0 == hdq_data->hdq_usecount) {
479			clk_disable(hdq_data->hdq_ick);
480			clk_disable(hdq_data->hdq_fck);
481		}
482	}
483	mutex_unlock(&hdq_data->hdq_mutex);
484
485	return ret;
486}
487
488/* Read a byte of data from the device */
489static u8 omap_w1_read_byte(void *_hdq)
490{
491	struct hdq_data *hdq_data = _hdq;
492	u8 val = 0;
493	int ret;
494
495	ret = hdq_read_byte(hdq_data, &val);
496	if (ret) {
497		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
498		if (ret < 0) {
499			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
500			return -EINTR;
501		}
502		hdq_data->init_trans = 0;
503		mutex_unlock(&hdq_data->hdq_mutex);
504		omap_hdq_put(hdq_data);
505		return -1;
506	}
507
508	/* Write followed by a read, release the module */
509	if (hdq_data->init_trans) {
510		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
511		if (ret < 0) {
512			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
513			return -EINTR;
514		}
515		hdq_data->init_trans = 0;
516		mutex_unlock(&hdq_data->hdq_mutex);
517		omap_hdq_put(hdq_data);
518	}
519
520	return val;
521}
522
523/* Write a byte of data to the device */
524static void omap_w1_write_byte(void *_hdq, u8 byte)
525{
526	struct hdq_data *hdq_data = _hdq;
527	int ret;
528	u8 status;
529
530	/* First write to initialize the transfer */
531	if (hdq_data->init_trans == 0)
532		omap_hdq_get(hdq_data);
533
534	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
535	if (ret < 0) {
536		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
537		return;
538	}
539	hdq_data->init_trans++;
540	mutex_unlock(&hdq_data->hdq_mutex);
541
542	ret = hdq_write_byte(hdq_data, byte, &status);
543	if (ret == 0) {
544		dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
545		return;
546	}
547
548	/* Second write, data transfered. Release the module */
549	if (hdq_data->init_trans > 1) {
550		omap_hdq_put(hdq_data);
551		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
552		if (ret < 0) {
553			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
554			return;
555		}
556		hdq_data->init_trans = 0;
557		mutex_unlock(&hdq_data->hdq_mutex);
558	}
559
560	return;
561}
562
563static int __devinit omap_hdq_probe(struct platform_device *pdev)
564{
565	struct hdq_data *hdq_data;
566	struct resource *res;
567	int ret, irq;
568	u8 rev;
569
570	hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
571	if (!hdq_data) {
572		dev_dbg(&pdev->dev, "unable to allocate memory\n");
573		ret = -ENOMEM;
574		goto err_kmalloc;
575	}
576
577	hdq_data->dev = &pdev->dev;
578	platform_set_drvdata(pdev, hdq_data);
579
580	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
581	if (!res) {
582		dev_dbg(&pdev->dev, "unable to get resource\n");
583		ret = -ENXIO;
584		goto err_resource;
585	}
586
587	hdq_data->hdq_base = ioremap(res->start, SZ_4K);
588	if (!hdq_data->hdq_base) {
589		dev_dbg(&pdev->dev, "ioremap failed\n");
590		ret = -EINVAL;
591		goto err_ioremap;
592	}
593
594	/* get interface & functional clock objects */
595	hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
596	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
597
598	if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
599		dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
600		if (IS_ERR(hdq_data->hdq_ick)) {
601			ret = PTR_ERR(hdq_data->hdq_ick);
602			goto err_clk;
603		}
604		if (IS_ERR(hdq_data->hdq_fck)) {
605			ret = PTR_ERR(hdq_data->hdq_fck);
606			clk_put(hdq_data->hdq_ick);
607			goto err_clk;
608		}
609	}
610
611	hdq_data->hdq_usecount = 0;
612	mutex_init(&hdq_data->hdq_mutex);
613
614	if (clk_enable(hdq_data->hdq_ick)) {
615		dev_dbg(&pdev->dev, "Can not enable ick\n");
616		ret = -ENODEV;
617		goto err_intfclk;
618	}
619
620	if (clk_enable(hdq_data->hdq_fck)) {
621		dev_dbg(&pdev->dev, "Can not enable fck\n");
622		ret = -ENODEV;
623		goto err_fnclk;
624	}
625
626	rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
627	dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
628		(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
629
630	spin_lock_init(&hdq_data->hdq_spinlock);
631
632	irq = platform_get_irq(pdev, 0);
633	if (irq	< 0) {
634		ret = -ENXIO;
635		goto err_irq;
636	}
637
638	ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
639	if (ret < 0) {
640		dev_dbg(&pdev->dev, "could not request irq\n");
641		goto err_irq;
642	}
643
644	omap_hdq_break(hdq_data);
645
646	/* don't clock the HDQ until it is needed */
647	clk_disable(hdq_data->hdq_ick);
648	clk_disable(hdq_data->hdq_fck);
649
650	omap_w1_master.data = hdq_data;
651
652	ret = w1_add_master_device(&omap_w1_master);
653	if (ret) {
654		dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
655		goto err_w1;
656	}
657
658	return 0;
659
660err_w1:
661err_irq:
662	clk_disable(hdq_data->hdq_fck);
663
664err_fnclk:
665	clk_disable(hdq_data->hdq_ick);
666
667err_intfclk:
668	clk_put(hdq_data->hdq_ick);
669	clk_put(hdq_data->hdq_fck);
670
671err_clk:
672	iounmap(hdq_data->hdq_base);
673
674err_ioremap:
675err_resource:
676	platform_set_drvdata(pdev, NULL);
677	kfree(hdq_data);
678
679err_kmalloc:
680	return ret;
681
682}
683
684static int omap_hdq_remove(struct platform_device *pdev)
685{
686	struct hdq_data *hdq_data = platform_get_drvdata(pdev);
687
688	mutex_lock(&hdq_data->hdq_mutex);
689
690	if (hdq_data->hdq_usecount) {
691		dev_dbg(&pdev->dev, "removed when use count is not zero\n");
692		mutex_unlock(&hdq_data->hdq_mutex);
693		return -EBUSY;
694	}
695
696	mutex_unlock(&hdq_data->hdq_mutex);
697
698	/* remove module dependency */
699	clk_put(hdq_data->hdq_ick);
700	clk_put(hdq_data->hdq_fck);
701	free_irq(INT_24XX_HDQ_IRQ, hdq_data);
702	platform_set_drvdata(pdev, NULL);
703	iounmap(hdq_data->hdq_base);
704	kfree(hdq_data);
705
706	return 0;
707}
708
709static int __init
710omap_hdq_init(void)
711{
712	return platform_driver_register(&omap_hdq_driver);
713}
714module_init(omap_hdq_init);
715
716static void __exit
717omap_hdq_exit(void)
718{
719	platform_driver_unregister(&omap_hdq_driver);
720}
721module_exit(omap_hdq_exit);
722
723module_param(w1_id, int, S_IRUSR);
724MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
725
726MODULE_AUTHOR("Texas Instruments");
727MODULE_DESCRIPTION("HDQ driver Library");
728MODULE_LICENSE("GPL");
729