Lines Matching refs:ch

84 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
89 switch (ch->tmu->model) {
91 return ioread8(ch->tmu->mapbase + 2);
93 return ioread8(ch->tmu->mapbase + 4);
100 return ioread16(ch->base + offs);
102 return ioread32(ch->base + offs);
105 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
111 switch (ch->tmu->model) {
113 return iowrite8(value, ch->tmu->mapbase + 2);
115 return iowrite8(value, ch->tmu->mapbase + 4);
122 iowrite16(value, ch->base + offs);
124 iowrite32(value, ch->base + offs);
127 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
132 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
133 value = sh_tmu_read(ch, TSTR);
136 value |= 1 << ch->index;
138 value &= ~(1 << ch->index);
140 sh_tmu_write(ch, TSTR, value);
141 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
144 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
149 ret = clk_enable(ch->tmu->clk);
151 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
152 ch->index);
157 sh_tmu_start_stop_ch(ch, 0);
160 sh_tmu_write(ch, TCOR, 0xffffffff);
161 sh_tmu_write(ch, TCNT, 0xffffffff);
164 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
167 sh_tmu_start_stop_ch(ch, 1);
172 static int sh_tmu_enable(struct sh_tmu_channel *ch)
174 if (ch->enable_count++ > 0)
177 pm_runtime_get_sync(&ch->tmu->pdev->dev);
178 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
180 return __sh_tmu_enable(ch);
183 static void __sh_tmu_disable(struct sh_tmu_channel *ch)
186 sh_tmu_start_stop_ch(ch, 0);
189 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
192 clk_disable(ch->tmu->clk);
195 static void sh_tmu_disable(struct sh_tmu_channel *ch)
197 if (WARN_ON(ch->enable_count == 0))
200 if (--ch->enable_count > 0)
203 __sh_tmu_disable(ch);
205 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
206 pm_runtime_put(&ch->tmu->pdev->dev);
209 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
213 sh_tmu_start_stop_ch(ch, 0);
216 sh_tmu_read(ch, TCR);
219 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
223 sh_tmu_write(ch, TCOR, delta);
225 sh_tmu_write(ch, TCOR, 0xffffffff);
227 sh_tmu_write(ch, TCNT, delta);
230 sh_tmu_start_stop_ch(ch, 1);
235 struct sh_tmu_channel *ch = dev_id;
238 if (clockevent_state_oneshot(&ch->ced))
239 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
241 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
244 ch->ced.event_handler(&ch->ced);
255 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
257 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
262 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
265 if (WARN_ON(ch->cs_enabled))
268 ret = sh_tmu_enable(ch);
270 ch->cs_enabled = true;
277 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
279 if (WARN_ON(!ch->cs_enabled))
282 sh_tmu_disable(ch);
283 ch->cs_enabled = false;
288 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
290 if (!ch->cs_enabled)
293 if (--ch->enable_count == 0) {
294 __sh_tmu_disable(ch);
295 dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
301 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
303 if (!ch->cs_enabled)
306 if (ch->enable_count++ == 0) {
307 dev_pm_genpd_resume(&ch->tmu->pdev->dev);
308 __sh_tmu_enable(ch);
312 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
315 struct clocksource *cs = &ch->cs;
327 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
328 ch->index);
330 clocksource_register_hz(cs, ch->tmu->rate);
339 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
341 sh_tmu_enable(ch);
344 ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
345 sh_tmu_set_next(ch, ch->periodic, 1);
351 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
354 sh_tmu_disable(ch);
361 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
365 sh_tmu_disable(ch);
367 dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
368 ch->index, periodic ? "periodic" : "oneshot");
369 sh_tmu_clock_event_start(ch, periodic);
386 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
391 sh_tmu_set_next(ch, delta, 0);
405 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
408 struct clock_event_device *ced = &ch->ced;
423 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
424 ch->index);
426 clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
428 ret = request_irq(ch->irq, sh_tmu_interrupt,
430 dev_name(&ch->tmu->pdev->dev), ch);
432 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
433 ch->index, ch->irq);
438 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
442 ch->tmu->has_clockevent = true;
443 sh_tmu_register_clockevent(ch, name);
445 ch->tmu->has_clocksource = true;
446 sh_tmu_register_clocksource(ch, name);
452 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
460 ch->tmu = tmu;
461 ch->index = index;
464 ch->base = tmu->mapbase + 4 + ch->index * 12;
466 ch->base = tmu->mapbase + 8 + ch->index * 12;
468 ch->irq = platform_get_irq(tmu->pdev, index);
469 if (ch->irq < 0)
470 return ch->irq;
472 ch->cs_enabled = false;
473 ch->enable_count = 0;
475 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),