1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2016 Cavium, Inc.
4 */
5
6#ifndef __CPTVF_H
7#define __CPTVF_H
8
9#include <linux/list.h>
10#include "cpt_common.h"
11
12/* Default command queue length */
13#define CPT_CMD_QLEN 2046
14#define CPT_CMD_QCHUNK_SIZE 1023
15
16/* Default command timeout in seconds */
17#define CPT_COMMAND_TIMEOUT 4
18#define CPT_TIMER_THOLD	0xFFFF
19#define CPT_NUM_QS_PER_VF 1
20#define CPT_INST_SIZE 64
21#define CPT_NEXT_CHUNK_PTR_SIZE 8
22
23#define	CPT_VF_MSIX_VECTORS 2
24#define CPT_VF_INTR_MBOX_MASK BIT(0)
25#define CPT_VF_INTR_DOVF_MASK BIT(1)
26#define CPT_VF_INTR_IRDE_MASK BIT(2)
27#define CPT_VF_INTR_NWRP_MASK BIT(3)
28#define CPT_VF_INTR_SERR_MASK BIT(4)
29#define DMA_DIRECT_DIRECT 0 /* Input DIRECT, Output DIRECT */
30#define DMA_GATHER_SCATTER 1
31#define FROM_DPTR 1
32
33/**
34 * Enumeration cpt_vf_int_vec_e
35 *
36 * CPT VF MSI-X Vector Enumeration
37 * Enumerates the MSI-X interrupt vectors.
38 */
39enum cpt_vf_int_vec_e {
40	CPT_VF_INT_VEC_E_MISC = 0x00,
41	CPT_VF_INT_VEC_E_DONE = 0x01
42};
43
44struct command_chunk {
45	u8 *head;
46	dma_addr_t dma_addr;
47	u32 size; /* Chunk size, max CPT_INST_CHUNK_MAX_SIZE */
48	struct hlist_node nextchunk;
49};
50
51struct command_queue {
52	spinlock_t lock; /* command queue lock */
53	u32 idx; /* Command queue host write idx */
54	u32 nchunks; /* Number of command chunks */
55	struct command_chunk *qhead;	/* Command queue head, instructions
56					 * are inserted here
57					 */
58	struct hlist_head chead;
59};
60
61struct command_qinfo {
62	u32 cmd_size;
63	u32 qchunksize; /* Command queue chunk size */
64	struct command_queue queue[CPT_NUM_QS_PER_VF];
65};
66
67struct pending_entry {
68	u8 busy; /* Entry status (free/busy) */
69
70	volatile u64 *completion_addr; /* Completion address */
71	void *post_arg;
72	void (*callback)(int, void *); /* Kernel ASYNC request callabck */
73	void *callback_arg; /* Kernel ASYNC request callabck arg */
74};
75
76struct pending_queue {
77	struct pending_entry *head;	/* head of the queue */
78	u32 front; /* Process work from here */
79	u32 rear; /* Append new work here */
80	atomic64_t pending_count;
81	spinlock_t lock; /* Queue lock */
82};
83
84struct pending_qinfo {
85	u32 nr_queues;	/* Number of queues supported */
86	u32 qlen; /* Queue length */
87	struct pending_queue queue[CPT_NUM_QS_PER_VF];
88};
89
90#define for_each_pending_queue(qinfo, q, i)	\
91	for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \
92	     q = &qinfo->queue[i])
93
94struct cpt_vf {
95	u16 flags; /* Flags to hold device status bits */
96	u8 vfid; /* Device Index 0...CPT_MAX_VF_NUM */
97	u8 vftype; /* VF type of SE_TYPE(1) or AE_TYPE(1) */
98	u8 vfgrp; /* VF group (0 - 8) */
99	u8 node; /* Operating node: Bits (46:44) in BAR0 address */
100	u8 priority; /* VF priority ring: 1-High proirity round
101		      * robin ring;0-Low priority round robin ring;
102		      */
103	struct pci_dev *pdev; /* pci device handle */
104	void __iomem *reg_base; /* Register start address */
105	void *wqe_info;	/* BH worker info */
106	/* MSI-X */
107	cpumask_var_t affinity_mask[CPT_VF_MSIX_VECTORS];
108	/* Command and Pending queues */
109	u32 qsize;
110	u32 nr_queues;
111	struct command_qinfo cqinfo; /* Command queue information */
112	struct pending_qinfo pqinfo; /* Pending queue information */
113	/* VF-PF mailbox communication */
114	bool pf_acked;
115	bool pf_nacked;
116};
117
118int cptvf_send_vf_up(struct cpt_vf *cptvf);
119int cptvf_send_vf_down(struct cpt_vf *cptvf);
120int cptvf_send_vf_to_grp_msg(struct cpt_vf *cptvf);
121int cptvf_send_vf_priority_msg(struct cpt_vf *cptvf);
122int cptvf_send_vq_size_msg(struct cpt_vf *cptvf);
123int cptvf_check_pf_ready(struct cpt_vf *cptvf);
124void cptvf_handle_mbox_intr(struct cpt_vf *cptvf);
125void cvm_crypto_exit(void);
126int cvm_crypto_init(struct cpt_vf *cptvf);
127void vq_post_process(struct cpt_vf *cptvf, u32 qno);
128void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val);
129#endif /* __CPTVF_H */
130