Lines Matching refs:cd

135 	struct genwqe_dev *cd;
144 cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL);
145 if (!cd)
148 cd->card_idx = i;
149 cd->class_genwqe = &class_genwqe;
150 cd->debugfs_genwqe = debugfs_genwqe;
156 cd->use_platform_recovery = CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY;
158 init_waitqueue_head(&cd->queue_waitq);
160 spin_lock_init(&cd->file_lock);
161 INIT_LIST_HEAD(&cd->file_list);
163 cd->card_state = GENWQE_CARD_UNUSED;
164 spin_lock_init(&cd->print_lock);
166 cd->ddcb_software_timeout = GENWQE_DDCB_SOFTWARE_TIMEOUT;
167 cd->kill_timeout = GENWQE_KILL_TIMEOUT;
170 cd->vf_jobtimeout_msec[j] = GENWQE_VF_JOBTIMEOUT_MSEC;
172 genwqe_devices[i] = cd;
173 return cd;
176 static void genwqe_dev_free(struct genwqe_dev *cd)
178 if (!cd)
181 genwqe_devices[cd->card_idx] = NULL;
182 kfree(cd);
187 * @cd: GenWQE device information
194 static int genwqe_bus_reset(struct genwqe_dev *cd)
197 struct pci_dev *pci_dev = cd->pci_dev;
200 if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
203 mmio = cd->mmio;
204 cd->mmio = NULL;
227 cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
238 cd->mmio = pci_iomap(pci_dev, 0, 0);
239 if (cd->mmio == NULL) {
259 bool genwqe_need_err_masking(struct genwqe_dev *cd)
261 return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
264 static void genwqe_tweak_hardware(struct genwqe_dev *cd)
266 struct pci_dev *pci_dev = cd->pci_dev;
269 if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) &&
270 ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) {
273 cd->slu_unitcfg, cd->app_unitcfg);
275 __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR,
278 __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK,
285 * @cd: GenWQE device information
292 int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd)
294 return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull;
297 int genwqe_flash_readback_fails(struct genwqe_dev *cd)
299 return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
304 * @cd: GenWQE device information
314 static int genwqe_T_psec(struct genwqe_dev *cd)
319 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
328 * @cd: GenWQE device information
336 static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
338 u32 T = genwqe_T_psec(cd);
348 genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
355 * @cd: GenWQE device information
357 static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
359 struct pci_dev *pci_dev = cd->pci_dev;
361 u32 T = genwqe_T_psec(cd);
371 if (cd->vf_jobtimeout_msec[vf] == 0)
374 x = ilog2(cd->vf_jobtimeout_msec[vf] *
377 genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
383 static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd)
390 e = genwqe_ffdc_buff_size(cd, 0);
393 e = genwqe_ffdc_buff_size(cd, 1);
396 e = genwqe_ffdc_buff_size(cd, 2);
404 cd->ffdc[type].entries = e;
405 cd->ffdc[type].regs =
416 static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd)
421 kfree(cd->ffdc[type].regs);
422 cd->ffdc[type].regs = NULL;
426 static int genwqe_read_ids(struct genwqe_dev *cd)
430 struct pci_dev *pci_dev = cd->pci_dev;
432 cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
433 if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) {
435 "err: SLUID=%016llx\n", cd->slu_unitcfg);
440 slu_id = genwqe_get_slu_id(cd);
448 cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
449 if (cd->app_unitcfg == IO_ILLEGAL_VALUE) {
451 "err: APPID=%016llx\n", cd->app_unitcfg);
455 genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name));
465 cd->is_privileged = 0;
467 cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
474 static int genwqe_start(struct genwqe_dev *cd)
477 struct pci_dev *pci_dev = cd->pci_dev;
479 err = genwqe_read_ids(cd);
483 if (genwqe_is_privileged(cd)) {
485 genwqe_ffdc_buffs_alloc(cd);
486 genwqe_stop_traps(cd);
489 genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs,
490 cd->ffdc[GENWQE_DBG_REGS].entries, 0);
492 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0,
493 cd->ffdc[GENWQE_DBG_UNIT0].regs,
494 cd->ffdc[GENWQE_DBG_UNIT0].entries);
496 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1,
497 cd->ffdc[GENWQE_DBG_UNIT1].regs,
498 cd->ffdc[GENWQE_DBG_UNIT1].entries);
500 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2,
501 cd->ffdc[GENWQE_DBG_UNIT2].regs,
502 cd->ffdc[GENWQE_DBG_UNIT2].entries);
504 genwqe_start_traps(cd);
506 if (cd->card_state == GENWQE_CARD_FATAL_ERROR) {
514 cd->softreset = 0x7Cull;
515 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
516 cd->softreset);
518 err = genwqe_bus_reset(cd);
531 err = genwqe_read_ids(cd);
537 err = genwqe_setup_service_layer(cd); /* does a reset to the card */
545 if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */
546 genwqe_tweak_hardware(cd);
548 genwqe_setup_pf_jtimer(cd);
549 genwqe_setup_vf_jtimer(cd);
552 err = genwqe_device_create(cd);
561 genwqe_release_service_layer(cd);
563 if (genwqe_is_privileged(cd))
564 genwqe_ffdc_buffs_free(cd);
570 * @cd: GenWQE device information
581 static int genwqe_stop(struct genwqe_dev *cd)
583 genwqe_finish_queue(cd); /* no register access */
584 genwqe_device_remove(cd); /* device removed, procs killed */
585 genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */
587 if (genwqe_is_privileged(cd)) {
588 pci_disable_sriov(cd->pci_dev); /* access pci config space */
589 genwqe_ffdc_buffs_free(cd);
597 * @cd: GenWQE device information
607 static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err)
610 struct pci_dev *pci_dev = cd->pci_dev;
612 genwqe_stop(cd);
619 cd->softreset = 0x70ull;
620 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
623 rc = genwqe_bus_reset(cd);
630 rc = genwqe_start(cd);
639 static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
641 *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
643 genwqe_recovery_on_fatal_gfir_required(cd);
648 * @cd: GenWQE device information
656 static u64 genwqe_fir_checking(struct genwqe_dev *cd)
661 struct pci_dev *pci_dev = cd->pci_dev;
671 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
693 fir = __genwqe_readq(cd, fir_addr);
703 fec = __genwqe_readq(cd, fec_addr);
716 sfir = __genwqe_readq(cd, sfir_addr);
724 sfec = __genwqe_readq(cd, sfec_addr);
731 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
747 __genwqe_writeq(cd, sfir_addr, sfir);
764 __genwqe_writeq(cd, fir_clr_addr, mask);
772 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
824 static int genwqe_platform_recovery(struct genwqe_dev *cd)
826 struct pci_dev *pci_dev = cd->pci_dev;
833 cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
837 genwqe_stop(cd);
842 rc = genwqe_start(cd);
860 * @cd: GenWQE device information
867 static int genwqe_reload_bistream(struct genwqe_dev *cd)
869 struct pci_dev *pci_dev = cd->pci_dev;
876 genwqe_stop(cd);
882 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
883 (cd->softreset & 0xcull) | 0x70ull);
898 rc = genwqe_start(cd);
934 struct genwqe_dev *cd = data;
935 struct pci_dev *pci_dev = cd->pci_dev;
940 rc = wait_event_interruptible_timeout(cd->health_waitq,
941 (genwqe_health_check_cond(cd, &gfir) ||
954 slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
962 app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
970 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
979 gfir_masked = genwqe_fir_checking(cd);
987 if ((gfir_masked) && !cd->skip_recovery &&
988 genwqe_recovery_on_fatal_gfir_required(cd)) {
990 cd->card_state = GENWQE_CARD_FATAL_ERROR;
992 rc = genwqe_recover_card(cd, 0);
999 if (cd->card_state == GENWQE_CARD_RELOAD_BITSTREAM) {
1001 rc = genwqe_reload_bistream(cd);
1006 cd->last_gfir = gfir;
1013 if (cd->use_platform_recovery) {
1019 readq(cd->mmio + IO_SLC_CFGREG_GFIR);
1030 rc = genwqe_platform_recovery(cd);
1039 cd->card_state = GENWQE_CARD_FATAL_ERROR;
1040 genwqe_stop(cd);
1049 static int genwqe_health_check_start(struct genwqe_dev *cd)
1057 /* init_waitqueue_head(&cd->health_waitq); */
1059 cd->health_thread = kthread_run(genwqe_health_thread, cd,
1061 cd->card_idx);
1062 if (IS_ERR(cd->health_thread)) {
1063 rc = PTR_ERR(cd->health_thread);
1064 cd->health_thread = NULL;
1070 static int genwqe_health_thread_running(struct genwqe_dev *cd)
1072 return cd->health_thread != NULL;
1075 static int genwqe_health_check_stop(struct genwqe_dev *cd)
1077 if (!genwqe_health_thread_running(cd))
1080 kthread_stop(cd->health_thread);
1081 cd->health_thread = NULL;
1087 * @cd: GenWQE device information
1089 static int genwqe_pci_setup(struct genwqe_dev *cd)
1092 struct pci_dev *pci_dev = cd->pci_dev;
1126 cd->mmio_len = pci_resource_len(pci_dev, 0);
1127 cd->mmio = pci_iomap(pci_dev, 0, 0);
1128 if (cd->mmio == NULL) {
1135 cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
1136 if (cd->num_vfs < 0)
1137 cd->num_vfs = 0;
1139 err = genwqe_read_ids(cd);
1146 pci_iounmap(pci_dev, cd->mmio);
1157 * @cd: GenWQE device information
1159 static void genwqe_pci_remove(struct genwqe_dev *cd)
1161 struct pci_dev *pci_dev = cd->pci_dev;
1163 if (cd->mmio)
1164 pci_iounmap(pci_dev, cd->mmio);
1183 struct genwqe_dev *cd;
1187 cd = genwqe_dev_alloc();
1188 if (IS_ERR(cd)) {
1190 (int)PTR_ERR(cd));
1191 return PTR_ERR(cd);
1194 dev_set_drvdata(&pci_dev->dev, cd);
1195 cd->pci_dev = pci_dev;
1197 err = genwqe_pci_setup(cd);
1204 err = genwqe_start(cd);
1211 if (genwqe_is_privileged(cd)) {
1212 err = genwqe_health_check_start(cd);
1223 genwqe_stop(cd);
1225 genwqe_pci_remove(cd);
1227 genwqe_dev_free(cd);
1239 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1241 genwqe_health_check_stop(cd);
1248 genwqe_stop(cd);
1249 genwqe_pci_remove(cd);
1250 genwqe_dev_free(cd);
1264 struct genwqe_dev *cd;
1268 cd = dev_get_drvdata(&pci_dev->dev);
1269 if (cd == NULL)
1273 genwqe_health_check_stop(cd);
1274 genwqe_stop(cd);
1284 genwqe_pci_remove(cd);
1292 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1294 rc = genwqe_pci_setup(cd);
1312 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1314 rc = genwqe_start(cd);
1316 rc = genwqe_health_check_start(cd);
1330 struct genwqe_dev *cd = dev_get_drvdata(&dev->dev);
1333 genwqe_setup_vf_jtimer(cd);