Lines Matching defs:cpts

21 #include "cpts.h"
50 static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
52 u32 r = cpts_read32(cpts, intstat_raw);
55 *high = cpts_read32(cpts, event_high);
56 *low = cpts_read32(cpts, event_low);
57 cpts_write32(cpts, EVENT_POP, event_pop);
63 static int cpts_purge_events(struct cpts *cpts)
69 list_for_each_safe(this, next, &cpts->events) {
73 list_add(&event->list, &cpts->pool);
79 dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
83 static void cpts_purge_txq(struct cpts *cpts)
89 skb_queue_walk_safe(&cpts->txq, skb, tmp) {
92 __skb_unlink(skb, &cpts->txq);
99 dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
105 static int cpts_fifo_read(struct cpts *cpts, int match)
114 spin_lock_irqsave(&cpts->lock, flags);
117 if (cpts_fifo_pop(cpts, &hi, &lo))
120 if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
121 dev_warn(cpts->dev, "cpts: event pool empty\n");
125 event = list_first_entry(&cpts->pool, struct cpts_event, list);
128 event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
131 dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
135 WRITE_ONCE(cpts->cur_timestamp, lo);
136 timecounter_read(&cpts->tc);
137 if (cpts->mult_new) {
138 cpts->cc.mult = cpts->mult_new;
139 cpts->mult_new = 0;
141 if (!cpts->irq_poll)
142 complete(&cpts->ts_push_complete);
150 list_add_tail(&event->list, &cpts->events);
160 ptp_clock_event(cpts->clock, &pevent);
163 dev_err(cpts->dev, "cpts: unknown event type\n");
170 spin_unlock_irqrestore(&cpts->lock, flags);
172 if (!cpts->irq_poll && need_schedule)
173 ptp_schedule_worker(cpts->clock, 0);
178 void cpts_misc_interrupt(struct cpts *cpts)
180 cpts_fifo_read(cpts, -1);
186 struct cpts *cpts = container_of(cc, struct cpts, cc);
188 return READ_ONCE(cpts->cur_timestamp);
191 static void cpts_update_cur_time(struct cpts *cpts, int match,
196 reinit_completion(&cpts->ts_push_complete);
199 spin_lock_irqsave(&cpts->lock, flags);
201 cpts_write32(cpts, TS_PUSH, ts_push);
202 cpts_read32(cpts, ts_push);
204 spin_unlock_irqrestore(&cpts->lock, flags);
206 if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
207 dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
209 if (!cpts->irq_poll &&
210 !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
211 dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
218 struct cpts *cpts = container_of(ptp, struct cpts, info);
220 mutex_lock(&cpts->ptp_clk_mutex);
222 cpts->mult_new = adjust_by_scaled_ppm(cpts->cc_mult, scaled_ppm);
224 cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
226 mutex_unlock(&cpts->ptp_clk_mutex);
232 struct cpts *cpts = container_of(ptp, struct cpts, info);
234 mutex_lock(&cpts->ptp_clk_mutex);
235 timecounter_adjtime(&cpts->tc, delta);
236 mutex_unlock(&cpts->ptp_clk_mutex);
245 struct cpts *cpts = container_of(ptp, struct cpts, info);
248 mutex_lock(&cpts->ptp_clk_mutex);
250 cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
252 ns = timecounter_read(&cpts->tc);
253 mutex_unlock(&cpts->ptp_clk_mutex);
263 struct cpts *cpts = container_of(ptp, struct cpts, info);
268 mutex_lock(&cpts->ptp_clk_mutex);
269 timecounter_init(&cpts->tc, &cpts->cc, ns);
270 mutex_unlock(&cpts->ptp_clk_mutex);
275 static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
279 if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
282 mutex_lock(&cpts->ptp_clk_mutex);
284 v = cpts_read32(cpts, control);
287 cpts->hw_ts_enable |= BIT(index);
290 cpts->hw_ts_enable &= ~BIT(index);
292 cpts_write32(cpts, v, control);
294 mutex_unlock(&cpts->ptp_clk_mutex);
302 struct cpts *cpts = container_of(ptp, struct cpts, info);
306 return cpts_extts_enable(cpts, rq->extts.index, on);
314 static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
329 spin_lock_irqsave(&cpts->txq.lock, flags);
330 skb_queue_splice_init(&cpts->txq, &txq_list);
331 spin_unlock_irqrestore(&cpts->txq.lock, flags);
345 dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
352 dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
358 spin_lock_irqsave(&cpts->txq.lock, flags);
359 skb_queue_splice(&txq_list, &cpts->txq);
360 spin_unlock_irqrestore(&cpts->txq.lock, flags);
365 static void cpts_process_events(struct cpts *cpts)
373 spin_lock_irqsave(&cpts->lock, flags);
374 list_splice_init(&cpts->events, &events);
375 spin_unlock_irqrestore(&cpts->lock, flags);
379 if (cpts_match_tx_ts(cpts, event) ||
386 spin_lock_irqsave(&cpts->lock, flags);
387 list_splice_tail(&events, &cpts->events);
388 list_splice_tail(&events_free, &cpts->pool);
389 spin_unlock_irqrestore(&cpts->lock, flags);
394 struct cpts *cpts = container_of(ptp, struct cpts, info);
395 unsigned long delay = cpts->ov_check_period;
399 mutex_lock(&cpts->ptp_clk_mutex);
401 cpts_update_cur_time(cpts, -1, NULL);
402 ns = timecounter_read(&cpts->tc);
404 cpts_process_events(cpts);
406 spin_lock_irqsave(&cpts->txq.lock, flags);
407 if (!skb_queue_empty(&cpts->txq)) {
408 cpts_purge_txq(cpts);
409 if (!skb_queue_empty(&cpts->txq))
412 spin_unlock_irqrestore(&cpts->txq.lock, flags);
414 dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
415 mutex_unlock(&cpts->ptp_clk_mutex);
457 static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
466 cpts_fifo_read(cpts, -1);
467 spin_lock_irqsave(&cpts->lock, flags);
468 list_for_each_safe(this, next, &cpts->events) {
472 list_add(&event->list, &cpts->pool);
484 list_add(&event->list, &cpts->pool);
488 spin_unlock_irqrestore(&cpts->lock, flags);
493 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
511 dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
514 ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
523 void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
537 dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
544 skb_queue_tail(&cpts->txq, skb);
545 ptp_schedule_worker(cpts->clock, 0);
549 int cpts_register(struct cpts *cpts)
553 skb_queue_head_init(&cpts->txq);
554 INIT_LIST_HEAD(&cpts->events);
555 INIT_LIST_HEAD(&cpts->pool);
557 list_add(&cpts->pool_data[i].list, &cpts->pool);
559 err = clk_enable(cpts->refclk);
563 cpts_write32(cpts, CPTS_EN, control);
564 cpts_write32(cpts, TS_PEND_EN, int_enable);
566 timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
568 cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
569 if (IS_ERR(cpts->clock)) {
570 err = PTR_ERR(cpts->clock);
571 cpts->clock = NULL;
574 cpts->phc_index = ptp_clock_index(cpts->clock);
576 ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
580 clk_disable(cpts->refclk);
585 void cpts_unregister(struct cpts *cpts)
587 if (WARN_ON(!cpts->clock))
590 ptp_clock_unregister(cpts->clock);
591 cpts->clock = NULL;
592 cpts->phc_index = -1;
594 cpts_write32(cpts, 0, int_enable);
595 cpts_write32(cpts, 0, control);
598 skb_queue_purge(&cpts->txq);
600 clk_disable(cpts->refclk);
604 static void cpts_calc_mult_shift(struct cpts *cpts)
609 freq = clk_get_rate(cpts->refclk);
614 maxsec = cpts->cc.mask;
623 cpts->ov_check_period = (HZ * maxsec) / 2;
624 dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
625 cpts->ov_check_period);
627 if (cpts->cc.mult || cpts->cc.shift)
630 clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
634 ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
636 dev_info(cpts->dev,
638 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
651 static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
660 refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
667 dev_err(cpts->dev, "mux-clock %s must have parents\n",
672 parent_names = devm_kcalloc(cpts->dev, num_parents,
675 mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
690 clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name,
693 &cpts->reg->rftclk_sel, 0, 0x1F,
700 ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw);
702 dev_err(cpts->dev, "add clkmux unreg action %d", ret);
710 ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider,
713 dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
724 static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
730 cpts->cc.mult = prop;
733 cpts->cc.shift = prop;
735 if ((cpts->cc.mult && !cpts->cc.shift) ||
736 (!cpts->cc.mult && cpts->cc.shift))
739 return cpts_of_mux_clk_setup(cpts, node);
742 dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
746 struct cpts *cpts_create(struct device *dev, void __iomem *regs,
749 struct cpts *cpts;
752 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
753 if (!cpts)
756 cpts->dev = dev;
757 cpts->reg = (struct cpsw_cpts __iomem *)regs;
758 cpts->irq_poll = true;
759 spin_lock_init(&cpts->lock);
760 mutex_init(&cpts->ptp_clk_mutex);
761 init_completion(&cpts->ts_push_complete);
763 ret = cpts_of_parse(cpts, node);
767 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
768 if (IS_ERR(cpts->refclk))
770 cpts->refclk = devm_clk_get(dev, "cpts");
772 if (IS_ERR(cpts->refclk)) {
773 dev_err(dev, "Failed to get cpts refclk %ld\n",
774 PTR_ERR(cpts->refclk));
775 return ERR_CAST(cpts->refclk);
778 ret = clk_prepare(cpts->refclk);
782 cpts->cc.read = cpts_systim_read;
783 cpts->cc.mask = CLOCKSOURCE_MASK(32);
784 cpts->info = cpts_info;
785 cpts->phc_index = -1;
788 cpts->info.n_ext_ts = n_ext_ts;
790 cpts_calc_mult_shift(cpts);
794 cpts->cc_mult = cpts->cc.mult;
796 return cpts;
800 void cpts_release(struct cpts *cpts)
802 if (!cpts)
805 if (WARN_ON(!cpts->refclk))
808 clk_unprepare(cpts->refclk);