• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/w1/masters/

Lines Matching defs:hdq_data

58 struct hdq_data {
103 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
105 return __raw_readb(hdq_data->hdq_base + offset);
108 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
110 __raw_writeb(val, hdq_data->hdq_base + offset);
113 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
116 u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
118 __raw_writeb(new_val, hdq_data->hdq_base + offset);
129 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
137 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
145 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
158 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
166 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
168 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
170 hdq_data->hdq_irqstatus = 0;
171 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
173 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
176 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
180 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
182 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
186 *status = hdq_data->hdq_irqstatus;
189 dev_dbg(hdq_data->dev, "timeout waiting for"
196 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
200 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
211 struct hdq_data *hdq_data = _hdq;
214 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
215 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
216 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
217 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
219 if (hdq_data->hdq_irqstatus &
257 static int _omap_hdq_reset(struct hdq_data *hdq_data)
262 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
269 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
274 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
277 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
280 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
283 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
291 static int omap_hdq_break(struct hdq_data *hdq_data)
297 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
299 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
304 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
306 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
308 hdq_data->hdq_irqstatus = 0;
309 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
312 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
319 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
321 dev_dbg(hdq_data->dev, "break wait elapsed\n");
326 tmp_status = hdq_data->hdq_irqstatus;
329 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
338 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
343 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
347 mutex_unlock(&hdq_data->hdq_mutex);
352 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
358 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
364 if (!hdq_data->hdq_usecount) {
369 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
370 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
378 while (!(hdq_data->hdq_irqstatus
383 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
385 status = hdq_data->hdq_irqstatus;
388 dev_dbg(hdq_data->dev, "timeout waiting for"
395 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
397 mutex_unlock(&hdq_data->hdq_mutex);
404 static int omap_hdq_get(struct hdq_data *hdq_data)
408 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
414 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
415 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
419 hdq_data->hdq_usecount++;
421 if (1 == hdq_data->hdq_usecount) {
422 if (clk_enable(hdq_data->hdq_ick)) {
423 dev_dbg(hdq_data->dev, "Can not enable ick\n");
427 if (clk_enable(hdq_data->hdq_fck)) {
428 dev_dbg(hdq_data->dev, "Can not enable fck\n");
429 clk_disable(hdq_data->hdq_ick);
435 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
437 ret = _omap_hdq_reset(hdq_data);
440 hdq_data->hdq_usecount--;
443 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
446 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
448 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
454 clk_put(hdq_data->hdq_ick);
455 clk_put(hdq_data->hdq_fck);
457 mutex_unlock(&hdq_data->hdq_mutex);
463 static int omap_hdq_put(struct hdq_data *hdq_data)
467 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
471 if (0 == hdq_data->hdq_usecount) {
472 dev_dbg(hdq_data->dev, "attempt to decrement use count"
476 hdq_data->hdq_usecount--;
478 if (0 == hdq_data->hdq_usecount) {
479 clk_disable(hdq_data->hdq_ick);
480 clk_disable(hdq_data->hdq_fck);
483 mutex_unlock(&hdq_data->hdq_mutex);
491 struct hdq_data *hdq_data = _hdq;
495 ret = hdq_read_byte(hdq_data, &val);
497 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
499 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
502 hdq_data->init_trans = 0;
503 mutex_unlock(&hdq_data->hdq_mutex);
504 omap_hdq_put(hdq_data);
509 if (hdq_data->init_trans) {
510 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
512 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
515 hdq_data->init_trans = 0;
516 mutex_unlock(&hdq_data->hdq_mutex);
517 omap_hdq_put(hdq_data);
526 struct hdq_data *hdq_data = _hdq;
531 if (hdq_data->init_trans == 0)
532 omap_hdq_get(hdq_data);
534 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
536 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
539 hdq_data->init_trans++;
540 mutex_unlock(&hdq_data->hdq_mutex);
542 ret = hdq_write_byte(hdq_data, byte, &status);
544 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
549 if (hdq_data->init_trans > 1) {
550 omap_hdq_put(hdq_data);
551 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
553 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
556 hdq_data->init_trans = 0;
557 mutex_unlock(&hdq_data->hdq_mutex);
565 struct hdq_data *hdq_data;
570 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
571 if (!hdq_data) {
577 hdq_data->dev = &pdev->dev;
578 platform_set_drvdata(pdev, hdq_data);
587 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
588 if (!hdq_data->hdq_base) {
595 hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
596 hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
598 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
600 if (IS_ERR(hdq_data->hdq_ick)) {
601 ret = PTR_ERR(hdq_data->hdq_ick);
604 if (IS_ERR(hdq_data->hdq_fck)) {
605 ret = PTR_ERR(hdq_data->hdq_fck);
606 clk_put(hdq_data->hdq_ick);
611 hdq_data->hdq_usecount = 0;
612 mutex_init(&hdq_data->hdq_mutex);
614 if (clk_enable(hdq_data->hdq_ick)) {
620 if (clk_enable(hdq_data->hdq_fck)) {
626 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
630 spin_lock_init(&hdq_data->hdq_spinlock);
638 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
644 omap_hdq_break(hdq_data);
647 clk_disable(hdq_data->hdq_ick);
648 clk_disable(hdq_data->hdq_fck);
650 omap_w1_master.data = hdq_data;
662 clk_disable(hdq_data->hdq_fck);
665 clk_disable(hdq_data->hdq_ick);
668 clk_put(hdq_data->hdq_ick);
669 clk_put(hdq_data->hdq_fck);
672 iounmap(hdq_data->hdq_base);
677 kfree(hdq_data);
686 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
688 mutex_lock(&hdq_data->hdq_mutex);
690 if (hdq_data->hdq_usecount) {
692 mutex_unlock(&hdq_data->hdq_mutex);
696 mutex_unlock(&hdq_data->hdq_mutex);
699 clk_put(hdq_data->hdq_ick);
700 clk_put(hdq_data->hdq_fck);
701 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
703 iounmap(hdq_data->hdq_base);
704 kfree(hdq_data);