1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2023, Linaro Ltd. All rights reserved.
4 */
5
6#include <linux/err.h>
7#include <linux/interrupt.h>
8#include <linux/kernel.h>
9#include <linux/mod_devicetable.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/regmap.h>
13#include <linux/regulator/consumer.h>
14#include <linux/slab.h>
15#include <linux/usb/pd.h>
16#include <linux/usb/tcpm.h>
17#include "qcom_pmic_typec.h"
18#include "qcom_pmic_typec_pdphy.h"
19
20/* PD PHY register offsets and bit fields */
21#define USB_PDPHY_MSG_CONFIG_REG	0x40
22#define MSG_CONFIG_PORT_DATA_ROLE	BIT(3)
23#define MSG_CONFIG_PORT_POWER_ROLE	BIT(2)
24#define MSG_CONFIG_SPEC_REV_MASK	(BIT(1) | BIT(0))
25
26#define USB_PDPHY_EN_CONTROL_REG	0x46
27#define CONTROL_ENABLE			BIT(0)
28
29#define USB_PDPHY_RX_STATUS_REG		0x4A
30#define RX_FRAME_TYPE			(BIT(0) | BIT(1) | BIT(2))
31
32#define USB_PDPHY_FRAME_FILTER_REG	0x4C
33#define FRAME_FILTER_EN_HARD_RESET	BIT(5)
34#define FRAME_FILTER_EN_SOP		BIT(0)
35
36#define USB_PDPHY_TX_SIZE_REG		0x42
37#define TX_SIZE_MASK			0xF
38
39#define USB_PDPHY_TX_CONTROL_REG	0x44
40#define TX_CONTROL_RETRY_COUNT(n)	(((n) & 0x3) << 5)
41#define TX_CONTROL_FRAME_TYPE(n)        (((n) & 0x7) << 2)
42#define TX_CONTROL_FRAME_TYPE_CABLE_RESET	(0x1 << 2)
43#define TX_CONTROL_SEND_SIGNAL		BIT(1)
44#define TX_CONTROL_SEND_MSG		BIT(0)
45
46#define USB_PDPHY_RX_SIZE_REG		0x48
47
48#define USB_PDPHY_RX_ACKNOWLEDGE_REG	0x4B
49#define RX_BUFFER_TOKEN			BIT(0)
50
51#define USB_PDPHY_BIST_MODE_REG		0x4E
52#define BIST_MODE_MASK			0xF
53#define BIST_ENABLE			BIT(7)
54#define PD_MSG_BIST			0x3
55#define PD_BIST_TEST_DATA_MODE		0x8
56
57#define USB_PDPHY_TX_BUFFER_HDR_REG	0x60
58#define USB_PDPHY_TX_BUFFER_DATA_REG	0x62
59
60#define USB_PDPHY_RX_BUFFER_REG		0x80
61
62/* VDD regulator */
63#define VDD_PDPHY_VOL_MIN		2800000	/* uV */
64#define VDD_PDPHY_VOL_MAX		3300000	/* uV */
65#define VDD_PDPHY_HPM_LOAD		3000	/* uA */
66
67/* Message Spec Rev field */
68#define PD_MSG_HDR_REV(hdr)		(((hdr) >> 6) & 3)
69
70/* timers */
71#define RECEIVER_RESPONSE_TIME		15	/* tReceiverResponse */
72#define HARD_RESET_COMPLETE_TIME	5	/* tHardResetComplete */
73
74/* Interrupt numbers */
75#define PMIC_PDPHY_SIG_TX_IRQ		0x0
76#define PMIC_PDPHY_SIG_RX_IRQ		0x1
77#define PMIC_PDPHY_MSG_TX_IRQ		0x2
78#define PMIC_PDPHY_MSG_RX_IRQ		0x3
79#define PMIC_PDPHY_MSG_TX_FAIL_IRQ	0x4
80#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ	0x5
81#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ	0x6
82#define PMIC_PDPHY_FR_SWAP_IRQ		0x7
83
84
85struct pmic_typec_pdphy_irq_data {
86	int				virq;
87	int				irq;
88	struct pmic_typec_pdphy		*pmic_typec_pdphy;
89};
90
91struct pmic_typec_pdphy {
92	struct device			*dev;
93	struct tcpm_port		*tcpm_port;
94	struct regmap			*regmap;
95	u32				base;
96
97	unsigned int			nr_irqs;
98	struct pmic_typec_pdphy_irq_data	*irq_data;
99
100	struct work_struct		reset_work;
101	struct work_struct		receive_work;
102	struct regulator		*vdd_pdphy;
103	spinlock_t			lock;		/* Register atomicity */
104};
105
106static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy)
107{
108	struct device *dev = pmic_typec_pdphy->dev;
109	int ret;
110
111	/* Terminate TX */
112	ret = regmap_write(pmic_typec_pdphy->regmap,
113			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
114	if (ret)
115		goto err;
116
117	ret = regmap_write(pmic_typec_pdphy->regmap,
118			   pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0);
119	if (ret)
120		goto err;
121
122	return;
123err:
124	dev_err(dev, "pd_reset_on error\n");
125}
126
127static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy)
128{
129	struct device *dev = pmic_typec_pdphy->dev;
130	int ret;
131
132	ret = regmap_write(pmic_typec_pdphy->regmap,
133			   pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG,
134			   FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET);
135	if (ret)
136		dev_err(dev, "pd_reset_off error\n");
137}
138
139static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
140{
141	struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
142						     reset_work);
143	unsigned long flags;
144
145	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
146
147	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
148	qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
149
150	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
151
152	tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port);
153}
154
155static int
156qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy)
157{
158	struct device *dev = pmic_typec_pdphy->dev;
159	unsigned int val;
160	int ret;
161
162	/* Clear TX control register */
163	ret = regmap_write(pmic_typec_pdphy->regmap,
164			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
165	if (ret)
166		goto done;
167
168	/* Perform readback to ensure sufficient delay for command to latch */
169	ret = regmap_read(pmic_typec_pdphy->regmap,
170			  pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val);
171
172done:
173	if (ret)
174		dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n");
175
176	return ret;
177}
178
179static int
180qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy,
181					 enum tcpm_transmit_type type,
182					 unsigned int negotiated_rev)
183{
184	struct device *dev = pmic_typec_pdphy->dev;
185	unsigned int val;
186	unsigned long flags;
187	int ret;
188
189	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
190
191	/* Clear TX control register */
192	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
193	if (ret)
194		goto done;
195
196	val = TX_CONTROL_SEND_SIGNAL;
197	if (negotiated_rev == PD_REV30)
198		val |= TX_CONTROL_RETRY_COUNT(2);
199	else
200		val |= TX_CONTROL_RETRY_COUNT(3);
201
202	if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET)
203		val |= TX_CONTROL_FRAME_TYPE(1);
204
205	ret = regmap_write(pmic_typec_pdphy->regmap,
206			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
207
208done:
209	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
210
211	dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n",
212		 type, negotiated_rev, ret);
213
214	return ret;
215}
216
217static int
218qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy,
219					  enum tcpm_transmit_type type,
220					  const struct pd_message *msg,
221					  unsigned int negotiated_rev)
222{
223	struct device *dev = pmic_typec_pdphy->dev;
224	unsigned int val, hdr_len, txbuf_len, txsize_len;
225	unsigned long flags;
226	int ret;
227
228	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
229
230	ret = regmap_read(pmic_typec_pdphy->regmap,
231			  pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
232			  &val);
233	if (ret)
234		goto done;
235
236	if (val) {
237		dev_err(dev, "pd_transmit_payload: RX message pending\n");
238		ret = -EBUSY;
239		goto done;
240	}
241
242	/* Clear TX control register */
243	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
244	if (ret)
245		goto done;
246
247	hdr_len = sizeof(msg->header);
248	txbuf_len = pd_header_cnt_le(msg->header) * 4;
249	txsize_len = hdr_len + txbuf_len - 1;
250
251	/* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
252	ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
253				pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
254				&msg->header, hdr_len);
255	if (ret)
256		goto done;
257
258	/* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */
259	if (txbuf_len) {
260		ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
261					pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG,
262					&msg->payload, txbuf_len);
263		if (ret)
264			goto done;
265	}
266
267	/* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */
268	ret = regmap_write(pmic_typec_pdphy->regmap,
269			   pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG,
270			   txsize_len);
271	if (ret)
272		goto done;
273
274	/* Clear TX control register */
275	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
276	if (ret)
277		goto done;
278
279	/* Initiate transmit with retry count as indicated by PD revision */
280	val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG;
281	if (pd_header_rev(msg->header) == PD_REV30)
282		val |= TX_CONTROL_RETRY_COUNT(2);
283	else
284		val |= TX_CONTROL_RETRY_COUNT(3);
285
286	ret = regmap_write(pmic_typec_pdphy->regmap,
287			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
288
289done:
290	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
291
292	if (ret) {
293		dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n",
294			hdr_len, &msg->header, txbuf_len, &msg->payload, ret);
295	}
296
297	return ret;
298}
299
300static int qcom_pmic_typec_pdphy_pd_transmit(struct tcpc_dev *tcpc,
301					     enum tcpm_transmit_type type,
302					     const struct pd_message *msg,
303					     unsigned int negotiated_rev)
304{
305	struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
306	struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
307	struct device *dev = pmic_typec_pdphy->dev;
308	int ret;
309
310	if (msg) {
311		ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy,
312								type, msg,
313								negotiated_rev);
314	} else {
315		ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy,
316							       type,
317							       negotiated_rev);
318	}
319
320	if (ret)
321		dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret);
322
323	return ret;
324}
325
326static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy)
327{
328	struct device *dev = pmic_typec_pdphy->dev;
329	struct pd_message msg;
330	unsigned int size, rx_status;
331	unsigned long flags;
332	int ret;
333
334	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
335
336	ret = regmap_read(pmic_typec_pdphy->regmap,
337			  pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size);
338	if (ret)
339		goto done;
340
341	/* Hardware requires +1 of the real read value to be passed */
342	if (size < 1 || size > sizeof(msg.payload) + 1) {
343		dev_dbg(dev, "pd_receive: invalid size %d\n", size);
344		goto done;
345	}
346
347	size += 1;
348	ret = regmap_read(pmic_typec_pdphy->regmap,
349			  pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG,
350			  &rx_status);
351
352	if (ret)
353		goto done;
354
355	ret = regmap_bulk_read(pmic_typec_pdphy->regmap,
356			       pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG,
357			       (u8 *)&msg, size);
358	if (ret)
359		goto done;
360
361	/* Return ownership of RX buffer to hardware */
362	ret = regmap_write(pmic_typec_pdphy->regmap,
363			   pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0);
364
365done:
366	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
367
368	if (!ret) {
369		dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
370		tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg, TCPC_TX_SOP);
371	}
372}
373
374static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
375{
376	struct pmic_typec_pdphy_irq_data *irq_data = dev_id;
377	struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy;
378	struct device *dev = pmic_typec_pdphy->dev;
379
380	switch (irq_data->virq) {
381	case PMIC_PDPHY_SIG_TX_IRQ:
382		dev_err(dev, "isr: tx_sig\n");
383		break;
384	case PMIC_PDPHY_SIG_RX_IRQ:
385		schedule_work(&pmic_typec_pdphy->reset_work);
386		break;
387	case PMIC_PDPHY_MSG_TX_IRQ:
388		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
389					  TCPC_TX_SUCCESS);
390		break;
391	case PMIC_PDPHY_MSG_RX_IRQ:
392		qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy);
393		break;
394	case PMIC_PDPHY_MSG_TX_FAIL_IRQ:
395		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
396					  TCPC_TX_FAILED);
397		break;
398	case PMIC_PDPHY_MSG_TX_DISCARD_IRQ:
399		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
400					  TCPC_TX_DISCARDED);
401		break;
402	}
403
404	return IRQ_HANDLED;
405}
406
407static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on)
408{
409	struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
410	struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
411	unsigned long flags;
412	int ret;
413
414	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
415
416	ret = regmap_write(pmic_typec_pdphy->regmap,
417			   pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on);
418
419	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
420
421	dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
422
423	return ret;
424}
425
426static int qcom_pmic_typec_pdphy_set_roles(struct tcpc_dev *tcpc, bool attached,
427					   enum typec_role power_role,
428					   enum typec_data_role data_role)
429{
430	struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
431	struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
432	struct device *dev = pmic_typec_pdphy->dev;
433	unsigned long flags;
434	int ret;
435
436	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
437
438	ret = regmap_update_bits(pmic_typec_pdphy->regmap,
439				 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
440				 MSG_CONFIG_PORT_DATA_ROLE |
441				 MSG_CONFIG_PORT_POWER_ROLE,
442				 (data_role == TYPEC_HOST ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
443				 (power_role == TYPEC_SOURCE ? MSG_CONFIG_PORT_POWER_ROLE : 0));
444
445	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
446
447	dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
448		data_role, power_role);
449
450	return ret;
451}
452
453static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy)
454{
455	struct device *dev = pmic_typec_pdphy->dev;
456	int ret;
457
458	/* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
459	ret = regmap_update_bits(pmic_typec_pdphy->regmap,
460				 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
461				 MSG_CONFIG_SPEC_REV_MASK, PD_REV20);
462	if (ret)
463		goto done;
464
465	ret = regmap_write(pmic_typec_pdphy->regmap,
466			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
467	if (ret)
468		goto done;
469
470	ret = regmap_write(pmic_typec_pdphy->regmap,
471			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG,
472			   CONTROL_ENABLE);
473	if (ret)
474		goto done;
475
476	qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
477done:
478	if (ret) {
479		regulator_disable(pmic_typec_pdphy->vdd_pdphy);
480		dev_err(dev, "pdphy_enable fail %d\n", ret);
481	}
482
483	return ret;
484}
485
486static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy)
487{
488	int ret;
489
490	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
491
492	ret = regmap_write(pmic_typec_pdphy->regmap,
493			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
494
495	return ret;
496}
497
498static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy)
499{
500	int ret;
501
502	ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy);
503	if (ret)
504		goto done;
505
506	usleep_range(400, 500);
507	ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy);
508done:
509	return ret;
510}
511
512static int qcom_pmic_typec_pdphy_start(struct pmic_typec *tcpm,
513				       struct tcpm_port *tcpm_port)
514{
515	struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
516	int i;
517	int ret;
518
519	ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
520	if (ret)
521		return ret;
522
523	pmic_typec_pdphy->tcpm_port = tcpm_port;
524
525	ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
526	if (ret)
527		return ret;
528
529	for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
530		enable_irq(pmic_typec_pdphy->irq_data[i].irq);
531
532	return 0;
533}
534
535static void qcom_pmic_typec_pdphy_stop(struct pmic_typec *tcpm)
536{
537	struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
538	int i;
539
540	for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
541		disable_irq(pmic_typec_pdphy->irq_data[i].irq);
542
543	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
544
545	regulator_disable(pmic_typec_pdphy->vdd_pdphy);
546}
547
548int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
549				struct pmic_typec *tcpm,
550				const struct pmic_typec_pdphy_resources *res,
551				struct regmap *regmap,
552				u32 base)
553{
554	struct pmic_typec_pdphy *pmic_typec_pdphy;
555	struct device *dev = &pdev->dev;
556	struct pmic_typec_pdphy_irq_data *irq_data;
557	int i, ret, irq;
558
559	pmic_typec_pdphy = devm_kzalloc(dev, sizeof(*pmic_typec_pdphy), GFP_KERNEL);
560	if (!pmic_typec_pdphy)
561		return -ENOMEM;
562
563	if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
564		return -EINVAL;
565
566	irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
567				GFP_KERNEL);
568	if (!irq_data)
569		return -ENOMEM;
570
571	pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy");
572	if (IS_ERR(pmic_typec_pdphy->vdd_pdphy))
573		return PTR_ERR(pmic_typec_pdphy->vdd_pdphy);
574
575	pmic_typec_pdphy->dev = dev;
576	pmic_typec_pdphy->base = base;
577	pmic_typec_pdphy->regmap = regmap;
578	pmic_typec_pdphy->nr_irqs = res->nr_irqs;
579	pmic_typec_pdphy->irq_data = irq_data;
580	spin_lock_init(&pmic_typec_pdphy->lock);
581	INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work);
582
583	for (i = 0; i < res->nr_irqs; i++, irq_data++) {
584		irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name);
585		if (irq < 0)
586			return irq;
587
588		irq_data->pmic_typec_pdphy = pmic_typec_pdphy;
589		irq_data->irq = irq;
590		irq_data->virq = res->irq_params[i].virq;
591
592		ret = devm_request_threaded_irq(dev, irq, NULL,
593						qcom_pmic_typec_pdphy_isr,
594						IRQF_ONESHOT | IRQF_NO_AUTOEN,
595						res->irq_params[i].irq_name,
596						irq_data);
597		if (ret)
598			return ret;
599	}
600
601	tcpm->pmic_typec_pdphy = pmic_typec_pdphy;
602
603	tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_set_pd_rx;
604	tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_set_roles;
605	tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_pd_transmit;
606
607	tcpm->pdphy_start = qcom_pmic_typec_pdphy_start;
608	tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stop;
609
610	return 0;
611}
612
613const struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
614	.irq_params = {
615		{
616			.virq = PMIC_PDPHY_SIG_TX_IRQ,
617			.irq_name = "sig-tx",
618		},
619		{
620			.virq = PMIC_PDPHY_SIG_RX_IRQ,
621			.irq_name = "sig-rx",
622		},
623		{
624			.virq = PMIC_PDPHY_MSG_TX_IRQ,
625			.irq_name = "msg-tx",
626		},
627		{
628			.virq = PMIC_PDPHY_MSG_RX_IRQ,
629			.irq_name = "msg-rx",
630		},
631		{
632			.virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
633			.irq_name = "msg-tx-failed",
634		},
635		{
636			.virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
637			.irq_name = "msg-tx-discarded",
638		},
639		{
640			.virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
641			.irq_name = "msg-rx-discarded",
642		},
643	},
644	.nr_irqs = 7,
645};
646