1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License:  Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 *  * Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 *
19 *  * Redistributions in binary form must reproduce the above
20 *    copyright notice, this list of conditions and the following
21 *    disclaimer in the documentation and/or other materials provided
22 *    with the distribution.
23 *
24 *  * Neither the name of Cavium Inc. nor the names of its contributors may be
25 *    used to endorse or promote products derived from this software without
26 *    specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#include "common.h"
47#include "zip_crypto.h"
48
49#define DRV_NAME		"ThunderX-ZIP"
50
51static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
52
53static const struct pci_device_id zip_id_table[] = {
54	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
55	{ 0, }
56};
57
58static void zip_debugfs_init(void);
59static void zip_debugfs_exit(void);
60static int zip_register_compression_device(void);
61static void zip_unregister_compression_device(void);
62
63void zip_reg_write(u64 val, u64 __iomem *addr)
64{
65	writeq(val, addr);
66}
67
68u64 zip_reg_read(u64 __iomem *addr)
69{
70	return readq(addr);
71}
72
73/*
74 * Allocates new ZIP device structure
75 * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
76 */
77static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
78{
79	struct zip_device *zip = NULL;
80	int idx;
81
82	for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
83		if (!zip_dev[idx])
84			break;
85	}
86
87	/* To ensure that the index is within the limit */
88	if (idx < MAX_ZIP_DEVICES)
89		zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
90
91	if (!zip)
92		return NULL;
93
94	zip_dev[idx] = zip;
95	zip->index = idx;
96	return zip;
97}
98
99/**
100 * zip_get_device - Get ZIP device based on node id of cpu
101 *
102 * @node: Node id of the current cpu
103 * Return: Pointer to Zip device structure
104 */
105struct zip_device *zip_get_device(int node)
106{
107	if ((node < MAX_ZIP_DEVICES) && (node >= 0))
108		return zip_dev[node];
109
110	zip_err("ZIP device not found for node id %d\n", node);
111	return NULL;
112}
113
114/**
115 * zip_get_node_id - Get the node id of the current cpu
116 *
117 * Return: Node id of the current cpu
118 */
119int zip_get_node_id(void)
120{
121	return cpu_to_node(raw_smp_processor_id());
122}
123
124/* Initializes the ZIP h/w sub-system */
125static int zip_init_hw(struct zip_device *zip)
126{
127	union zip_cmd_ctl    cmd_ctl;
128	union zip_constants  constants;
129	union zip_que_ena    que_ena;
130	union zip_quex_map   que_map;
131	union zip_que_pri    que_pri;
132
133	union zip_quex_sbuf_addr que_sbuf_addr;
134	union zip_quex_sbuf_ctl  que_sbuf_ctl;
135
136	int q = 0;
137
138	/* Enable the ZIP Engine(Core) Clock */
139	cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
140	cmd_ctl.s.forceclk = 1;
141	zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
142
143	zip_msg("ZIP_CMD_CTL  : 0x%016llx",
144		zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
145
146	constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
147	zip->depth    = constants.s.depth;
148	zip->onfsize  = constants.s.onfsize;
149	zip->ctxsize  = constants.s.ctxsize;
150
151	zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
152		zip->depth, zip->onfsize, zip->ctxsize);
153
154	/*
155	 * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
156	 * have the correct buffer pointer and size configured for each
157	 * instruction queue.
158	 */
159	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
160		que_sbuf_ctl.u_reg64 = 0ull;
161		que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
162		que_sbuf_ctl.s.inst_be   = 0;
163		que_sbuf_ctl.s.stream_id = 0;
164		zip_reg_write(que_sbuf_ctl.u_reg64,
165			      (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
166
167		zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
168			zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
169	}
170
171	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
172		memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
173
174		spin_lock_init(&zip->iq[q].lock);
175
176		if (zip_cmd_qbuf_alloc(zip, q)) {
177			while (q != 0) {
178				q--;
179				zip_cmd_qbuf_free(zip, q);
180			}
181			return -ENOMEM;
182		}
183
184		/* Initialize tail ptr to head */
185		zip->iq[q].sw_tail = zip->iq[q].sw_head;
186		zip->iq[q].hw_tail = zip->iq[q].sw_head;
187
188		/* Write the physical addr to register */
189		que_sbuf_addr.u_reg64   = 0ull;
190		que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
191				       ZIP_128B_ALIGN);
192
193		zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
194			(u64)que_sbuf_addr.s.ptr);
195
196		zip_reg_write(que_sbuf_addr.u_reg64,
197			      (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
198
199		zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
200			zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
201
202		zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
203			zip->iq[q].sw_head, zip->iq[q].sw_tail,
204			zip->iq[q].hw_tail);
205		zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
206	}
207
208	/*
209	 * Queue-to-ZIP core mapping
210	 * If a queue is not mapped to a particular core, it is equivalent to
211	 * the ZIP core being disabled.
212	 */
213	que_ena.u_reg64 = 0x0ull;
214	/* Enabling queues based on ZIP_NUM_QUEUES */
215	for (q = 0; q < ZIP_NUM_QUEUES; q++)
216		que_ena.s.ena |= (0x1 << q);
217	zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
218
219	zip_msg("QUE_ENA      : 0x%016llx",
220		zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
221
222	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
223		que_map.u_reg64 = 0ull;
224		/* Mapping each queue to two ZIP cores */
225		que_map.s.zce = 0x3;
226		zip_reg_write(que_map.u_reg64,
227			      (zip->reg_base + ZIP_QUEX_MAP(q)));
228
229		zip_msg("QUE_MAP(%d)   : 0x%016llx", q,
230			zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
231	}
232
233	que_pri.u_reg64 = 0ull;
234	for (q = 0; q < ZIP_NUM_QUEUES; q++)
235		que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
236	zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
237
238	zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
239
240	return 0;
241}
242
243static void zip_reset(struct zip_device *zip)
244{
245	union zip_cmd_ctl cmd_ctl;
246
247	cmd_ctl.u_reg64 = 0x0ull;
248	cmd_ctl.s.reset = 1;  /* Forces ZIP cores to do reset */
249	zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
250}
251
252static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
253{
254	struct device *dev = &pdev->dev;
255	struct zip_device *zip = NULL;
256	int    err;
257
258	zip = zip_alloc_device(pdev);
259	if (!zip)
260		return -ENOMEM;
261
262	dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
263		 pdev->vendor, pdev->device, dev_to_node(dev));
264
265	pci_set_drvdata(pdev, zip);
266	zip->pdev = pdev;
267
268	err = pci_enable_device(pdev);
269	if (err) {
270		dev_err(dev, "Failed to enable PCI device");
271		goto err_free_device;
272	}
273
274	err = pci_request_regions(pdev, DRV_NAME);
275	if (err) {
276		dev_err(dev, "PCI request regions failed 0x%x", err);
277		goto err_disable_device;
278	}
279
280	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
281	if (err) {
282		dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
283		goto err_release_regions;
284	}
285
286	/* MAP configuration registers */
287	zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
288	if (!zip->reg_base) {
289		dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
290		err = -ENOMEM;
291		goto err_release_regions;
292	}
293
294	/* Initialize ZIP Hardware */
295	err = zip_init_hw(zip);
296	if (err)
297		goto err_release_regions;
298
299	/* Register with the Kernel Crypto Interface */
300	err = zip_register_compression_device();
301	if (err < 0) {
302		zip_err("ZIP: Kernel Crypto Registration failed\n");
303		goto err_register;
304	}
305
306	/* comp-decomp statistics are handled with debugfs interface */
307	zip_debugfs_init();
308
309	return 0;
310
311err_register:
312	zip_reset(zip);
313
314err_release_regions:
315	if (zip->reg_base)
316		iounmap(zip->reg_base);
317	pci_release_regions(pdev);
318
319err_disable_device:
320	pci_disable_device(pdev);
321
322err_free_device:
323	pci_set_drvdata(pdev, NULL);
324
325	/* Remove zip_dev from zip_device list, free the zip_device memory */
326	zip_dev[zip->index] = NULL;
327	devm_kfree(dev, zip);
328
329	return err;
330}
331
332static void zip_remove(struct pci_dev *pdev)
333{
334	struct zip_device *zip = pci_get_drvdata(pdev);
335	int q = 0;
336
337	if (!zip)
338		return;
339
340	zip_debugfs_exit();
341
342	zip_unregister_compression_device();
343
344	if (zip->reg_base) {
345		zip_reset(zip);
346		iounmap(zip->reg_base);
347	}
348
349	pci_release_regions(pdev);
350	pci_disable_device(pdev);
351
352	/*
353	 * Free Command Queue buffers. This free should be called for all
354	 * the enabled Queues.
355	 */
356	for (q = 0; q < ZIP_NUM_QUEUES; q++)
357		zip_cmd_qbuf_free(zip, q);
358
359	pci_set_drvdata(pdev, NULL);
360	/* remove zip device from zip device list */
361	zip_dev[zip->index] = NULL;
362}
363
364/* PCI Sub-System Interface */
365static struct pci_driver zip_driver = {
366	.name	    =  DRV_NAME,
367	.id_table   =  zip_id_table,
368	.probe	    =  zip_probe,
369	.remove     =  zip_remove,
370};
371
372/* Kernel Crypto Subsystem Interface */
373
374static struct crypto_alg zip_comp_deflate = {
375	.cra_name		= "deflate",
376	.cra_driver_name	= "deflate-cavium",
377	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
378	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
379	.cra_priority           = 300,
380	.cra_module		= THIS_MODULE,
381	.cra_init		= zip_alloc_comp_ctx_deflate,
382	.cra_exit		= zip_free_comp_ctx,
383	.cra_u			= { .compress = {
384		.coa_compress	= zip_comp_compress,
385		.coa_decompress	= zip_comp_decompress
386		 } }
387};
388
389static struct crypto_alg zip_comp_lzs = {
390	.cra_name		= "lzs",
391	.cra_driver_name	= "lzs-cavium",
392	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
393	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
394	.cra_priority           = 300,
395	.cra_module		= THIS_MODULE,
396	.cra_init		= zip_alloc_comp_ctx_lzs,
397	.cra_exit		= zip_free_comp_ctx,
398	.cra_u			= { .compress = {
399		.coa_compress	= zip_comp_compress,
400		.coa_decompress	= zip_comp_decompress
401		 } }
402};
403
404static struct scomp_alg zip_scomp_deflate = {
405	.alloc_ctx		= zip_alloc_scomp_ctx_deflate,
406	.free_ctx		= zip_free_scomp_ctx,
407	.compress		= zip_scomp_compress,
408	.decompress		= zip_scomp_decompress,
409	.base			= {
410		.cra_name		= "deflate",
411		.cra_driver_name	= "deflate-scomp-cavium",
412		.cra_module		= THIS_MODULE,
413		.cra_priority           = 300,
414	}
415};
416
417static struct scomp_alg zip_scomp_lzs = {
418	.alloc_ctx		= zip_alloc_scomp_ctx_lzs,
419	.free_ctx		= zip_free_scomp_ctx,
420	.compress		= zip_scomp_compress,
421	.decompress		= zip_scomp_decompress,
422	.base			= {
423		.cra_name		= "lzs",
424		.cra_driver_name	= "lzs-scomp-cavium",
425		.cra_module		= THIS_MODULE,
426		.cra_priority           = 300,
427	}
428};
429
430static int zip_register_compression_device(void)
431{
432	int ret;
433
434	ret = crypto_register_alg(&zip_comp_deflate);
435	if (ret < 0) {
436		zip_err("Deflate algorithm registration failed\n");
437		return ret;
438	}
439
440	ret = crypto_register_alg(&zip_comp_lzs);
441	if (ret < 0) {
442		zip_err("LZS algorithm registration failed\n");
443		goto err_unregister_alg_deflate;
444	}
445
446	ret = crypto_register_scomp(&zip_scomp_deflate);
447	if (ret < 0) {
448		zip_err("Deflate scomp algorithm registration failed\n");
449		goto err_unregister_alg_lzs;
450	}
451
452	ret = crypto_register_scomp(&zip_scomp_lzs);
453	if (ret < 0) {
454		zip_err("LZS scomp algorithm registration failed\n");
455		goto err_unregister_scomp_deflate;
456	}
457
458	return ret;
459
460err_unregister_scomp_deflate:
461	crypto_unregister_scomp(&zip_scomp_deflate);
462err_unregister_alg_lzs:
463	crypto_unregister_alg(&zip_comp_lzs);
464err_unregister_alg_deflate:
465	crypto_unregister_alg(&zip_comp_deflate);
466
467	return ret;
468}
469
470static void zip_unregister_compression_device(void)
471{
472	crypto_unregister_alg(&zip_comp_deflate);
473	crypto_unregister_alg(&zip_comp_lzs);
474	crypto_unregister_scomp(&zip_scomp_deflate);
475	crypto_unregister_scomp(&zip_scomp_lzs);
476}
477
478/*
479 * debugfs functions
480 */
481#ifdef CONFIG_DEBUG_FS
482#include <linux/debugfs.h>
483
484/* Displays ZIP device statistics */
485static int zip_stats_show(struct seq_file *s, void *unused)
486{
487	u64 val = 0ull;
488	u64 avg_chunk = 0ull, avg_cr = 0ull;
489	u32 q = 0;
490
491	int index  = 0;
492	struct zip_device *zip;
493	struct zip_stats  *st;
494
495	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
496		u64 pending = 0;
497
498		if (zip_dev[index]) {
499			zip = zip_dev[index];
500			st  = &zip->stats;
501
502			/* Get all the pending requests */
503			for (q = 0; q < ZIP_NUM_QUEUES; q++) {
504				val = zip_reg_read((zip->reg_base +
505						    ZIP_DBG_QUEX_STA(q)));
506				pending += val >> 32 & 0xffffff;
507			}
508
509			val = atomic64_read(&st->comp_req_complete);
510			avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
511
512			val = atomic64_read(&st->comp_out_bytes);
513			avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
514			seq_printf(s, "        ZIP Device %d Stats\n"
515				      "-----------------------------------\n"
516				      "Comp Req Submitted        : \t%lld\n"
517				      "Comp Req Completed        : \t%lld\n"
518				      "Compress In Bytes         : \t%lld\n"
519				      "Compressed Out Bytes      : \t%lld\n"
520				      "Average Chunk size        : \t%llu\n"
521				      "Average Compression ratio : \t%llu\n"
522				      "Decomp Req Submitted      : \t%lld\n"
523				      "Decomp Req Completed      : \t%lld\n"
524				      "Decompress In Bytes       : \t%lld\n"
525				      "Decompressed Out Bytes    : \t%lld\n"
526				      "Decompress Bad requests   : \t%lld\n"
527				      "Pending Req               : \t%lld\n"
528					"---------------------------------\n",
529				       index,
530				       (u64)atomic64_read(&st->comp_req_submit),
531				       (u64)atomic64_read(&st->comp_req_complete),
532				       (u64)atomic64_read(&st->comp_in_bytes),
533				       (u64)atomic64_read(&st->comp_out_bytes),
534				       avg_chunk,
535				       avg_cr,
536				       (u64)atomic64_read(&st->decomp_req_submit),
537				       (u64)atomic64_read(&st->decomp_req_complete),
538				       (u64)atomic64_read(&st->decomp_in_bytes),
539				       (u64)atomic64_read(&st->decomp_out_bytes),
540				       (u64)atomic64_read(&st->decomp_bad_reqs),
541				       pending);
542		}
543	}
544	return 0;
545}
546
547/* Clears stats data */
548static int zip_clear_show(struct seq_file *s, void *unused)
549{
550	int index = 0;
551
552	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
553		if (zip_dev[index]) {
554			memset(&zip_dev[index]->stats, 0,
555			       sizeof(struct zip_stats));
556			seq_printf(s, "Cleared stats for zip %d\n", index);
557		}
558	}
559
560	return 0;
561}
562
563static struct zip_registers zipregs[64] = {
564	{"ZIP_CMD_CTL        ",  0x0000ull},
565	{"ZIP_THROTTLE       ",  0x0010ull},
566	{"ZIP_CONSTANTS      ",  0x00A0ull},
567	{"ZIP_QUE0_MAP       ",  0x1400ull},
568	{"ZIP_QUE1_MAP       ",  0x1408ull},
569	{"ZIP_QUE_ENA        ",  0x0500ull},
570	{"ZIP_QUE_PRI        ",  0x0508ull},
571	{"ZIP_QUE0_DONE      ",  0x2000ull},
572	{"ZIP_QUE1_DONE      ",  0x2008ull},
573	{"ZIP_QUE0_DOORBELL  ",  0x4000ull},
574	{"ZIP_QUE1_DOORBELL  ",  0x4008ull},
575	{"ZIP_QUE0_SBUF_ADDR ",  0x1000ull},
576	{"ZIP_QUE1_SBUF_ADDR ",  0x1008ull},
577	{"ZIP_QUE0_SBUF_CTL  ",  0x1200ull},
578	{"ZIP_QUE1_SBUF_CTL  ",  0x1208ull},
579	{ NULL, 0}
580};
581
582/* Prints registers' contents */
583static int zip_regs_show(struct seq_file *s, void *unused)
584{
585	u64 val = 0;
586	int i = 0, index = 0;
587
588	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
589		if (zip_dev[index]) {
590			seq_printf(s, "--------------------------------\n"
591				      "     ZIP Device %d Registers\n"
592				      "--------------------------------\n",
593				      index);
594
595			i = 0;
596
597			while (zipregs[i].reg_name) {
598				val = zip_reg_read((zip_dev[index]->reg_base +
599						    zipregs[i].reg_offset));
600				seq_printf(s, "%s: 0x%016llx\n",
601					   zipregs[i].reg_name, val);
602				i++;
603			}
604		}
605	}
606	return 0;
607}
608
609DEFINE_SHOW_ATTRIBUTE(zip_stats);
610DEFINE_SHOW_ATTRIBUTE(zip_clear);
611DEFINE_SHOW_ATTRIBUTE(zip_regs);
612
613/* Root directory for thunderx_zip debugfs entry */
614static struct dentry *zip_debugfs_root;
615
616static void zip_debugfs_init(void)
617{
618	if (!debugfs_initialized())
619		return;
620
621	zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
622
623	/* Creating files for entries inside thunderx_zip directory */
624	debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
625			    &zip_stats_fops);
626
627	debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
628			    &zip_clear_fops);
629
630	debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
631			    &zip_regs_fops);
632
633}
634
635static void zip_debugfs_exit(void)
636{
637	debugfs_remove_recursive(zip_debugfs_root);
638}
639
640#else
641static void __init zip_debugfs_init(void) { }
642static void __exit zip_debugfs_exit(void) { }
643#endif
644/* debugfs - end */
645
646module_pci_driver(zip_driver);
647
648MODULE_AUTHOR("Cavium Inc");
649MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
650MODULE_LICENSE("GPL v2");
651MODULE_DEVICE_TABLE(pci, zip_id_table);
652