Searched refs:dq (Results 1 - 20 of 20) sorted by relevance

/u-boot/drivers/ddr/marvell/axp/
H A Dddr3_pbs.c45 /* PBS locked dq (per pup) */
97 u32 pup, dq, pups, cur_max_pup, valid_pup, reg; local
126 for (dq = 0; dq < DQ_NUM; dq++)
127 skew_sum_array[pup][dq] = 0;
174 for (dq = 0; dq < DQ_NUM; dq++) {
177 (max_pup - 1)][dq]
539 u32 pup, dq, pups, cur_max_pup, valid_pup, reg; local
1090 u32 dq; local
1149 u32 pup, dq; local
1419 u32 pup, phys_pup, dq; local
[all...]
H A Dddr3_dqs.c313 u32 dq; local
347 for (dq = 0; dq < DQ_NUM; dq++) {
348 analog_pbs_sum[pup][dq][0] = adll_start_val;
349 analog_pbs_sum[pup][dq][1] = adll_end_val;
379 for (dq = 0; dq < DQ_NUM; dq++) {
380 analog_pbs[victim_dq][pup][dq][
708 u32 dq; local
[all...]
H A Dddr3_sdram.c27 /* PBS locked dq (per pup) */
98 __maybe_unused u32 dq; local
114 for (dq = 0; dq < DQ_NUM; dq++) {
117 if (((var1 >> dq) & 0x1) !=
118 ((var2 >> dq) & 0x1))
119 per_bit_data[val][dq] = 1;
121 per_bit_data[val][dq] = 0;
171 __maybe_unused u32 dq; local
291 u32 ui, dq, pup; local
[all...]
H A Dddr3_hw_training.c699 u32 val, pup, tmp_cs, cs, i, dq; local
740 for (dq = 0; dq <= DQ_NUM;
741 dq++) {
743 mode_config[i] + dq,
/u-boot/arch/mips/mach-octeon/include/mach/
H A Dcvmx-pko3.h418 u64 dq : 10; member in struct:cvmx_pko_lmtdma_data::__anon47
440 * @param dq dq
443 unsigned int dq)
455 pparam += dq;
462 static inline void cvmx_pko3_dq_set_limit(unsigned int node, unsigned int dq, argument
467 pparam = cvmx_pko3_dq_parameters(node, dq);
603 * @param dq is the destination descriptor queue.
613 __cvmx_pko3_lmtdma(u8 node, uint16_t dq, unsigned int numwords, bool tag_wait) argument
629 pparam = cvmx_pko3_dq_parameters(node, dq);
442 cvmx_pko3_dq_parameters(unsigned int node, unsigned int dq) argument
716 __cvmx_pko3_do_dma(u8 node, uint16_t dq, u64 cmds[], unsigned int numwords, enum cvmx_pko_dqop dqop) argument
837 cvmx_pko3_xmit_link_buf(int dq, cvmx_buf_ptr_pki_t pki_ptr, unsigned int len, int gaura, u64 *pcounter, u32 *ptag) argument
[all...]
H A Dcvmx-pko.h12 cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command,
H A Dcvmx-pko-defs.h567 u64 dq : 10; member in struct:cvmx_pko_dqx_pick::cvmx_pko_dqx_pick_s
1164 u64 dq : 10; member in struct:cvmx_pko_l1_sqx_pick::cvmx_pko_l1_sqx_pick_s
1552 u64 dq : 10; member in struct:cvmx_pko_l2_sqx_pick::cvmx_pko_l2_sqx_pick_s
1953 u64 dq : 10; member in struct:cvmx_pko_l3_sqx_pick::cvmx_pko_l3_sqx_pick_s
2335 u64 dq : 10; member in struct:cvmx_pko_l4_sqx_pick::cvmx_pko_l4_sqx_pick_s
2660 u64 dq : 10; member in struct:cvmx_pko_l5_sqx_pick::cvmx_pko_l5_sqx_pick_s
H A Dcvmx-lmcx-defs.h1260 * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm.
1277 * At the same time though, dq/dqs should be delayed because there is also
1288 uint64_t dq:5; member in struct:cvmx_lmcx_delay_cfg::cvmx_lmcx_delay_cfg_s
1295 uint64_t dq:4; member in struct:cvmx_lmcx_delay_cfg::cvmx_lmcx_delay_cfg_cn38xx
/u-boot/include/fsl-mc/
H A Dfsl_dpaa_fd.h100 static inline int ldpaa_dq_is_pull(const struct ldpaa_dq *dq) argument
102 return (int)(ldpaa_dq_flags(dq) & LDPAA_DQ_STAT_VOLATILE);
105 const struct ldpaa_dq *dq)
107 return (int)(ldpaa_dq_flags(dq) & LDPAA_DQ_STAT_EXPIRED);
104 ldpaa_dq_is_pull_complete( const struct ldpaa_dq *dq) argument
/u-boot/include/crypto/internal/
H A Drsa.h20 * @dq : RSA exponent d mod (q - 1) raw byte stream
28 * @dq_sz : length in bytes of dq field
38 const u8 *dq; member in struct:rsa_key
/u-boot/drivers/net/fsl-mc/dpio/
H A Dqbman_portal.c377 const struct ldpaa_dq *dq; local
380 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
381 p = qb_cl(dq);
407 flags = ldpaa_dq_flags(dq);
416 return dq;
420 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct ldpaa_dq *dq) argument
422 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
429 void qbman_dq_entry_set_oldtoken(struct ldpaa_dq *dq, argument
433 memset(dq, oldtoken, num_entries * sizeof(*dq));
436 qbman_dq_entry_has_newtoken(struct qbman_swp *s, const struct ldpaa_dq *dq, uint8_t newtoken) argument
484 __qbman_dq_entry_is_x(const struct ldpaa_dq *dq, uint32_t x) argument
492 qbman_dq_entry_is_DQ(const struct ldpaa_dq *dq) argument
503 ldpaa_dq_flags(const struct ldpaa_dq *dq) argument
510 ldpaa_dq_fd(const struct ldpaa_dq *dq) argument
[all...]
/u-boot/arch/mips/mach-octeon/
H A Dcvmx-helper-pko3.c173 unsigned int chan, dq; local
298 dq = res;
300 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0) && (dq & 7))
302 __func__, dq);
305 dq, num_queues, prio);
310 res = __cvmx_pko3_ipd_dq_register(xiface, chan, dq, num_queues);
494 int dq; local
603 dq = cvmx_pko_alloc_queues(node, level, res_owner, -1, num_queues);
604 if (dq < 0) {
609 res = cvmx_pko3_sq_config_children(node, level, parent_q, dq,
645 const int dq = 0; /* Reserve DQ#0 for NULL */ local
[all...]
H A Dcvmx-pko3.c234 * @param dq is the descriptor queue number to be opened.
237 int cvmx_pko_dq_open(int node, int dq) argument
244 debug("%s: DEBUG: dq %u\n", __func__, dq);
248 pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_OPEN);
255 cvmx_printf("%s: ERROR: Failed to open dq :%u: %s\n", __func__,
256 dq, pko_dqstatus_error(dqstatus));
261 p_param = cvmx_pko3_dq_parameters(node, dq);
775 * @param dq descriptor queue to set
776 * @param min_pad minimum padding to set for dq
778 cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad) argument
[all...]
H A Dcvmx-pko3-queue.c125 * Initialize port/dq table contents
138 * Find or allocate global port/dq map table
228 debug("%s: ipd_port=%#x ix=%#x dq %u cnt %u\n", __func__,
616 * @param dq is the descriptor queue number to be configured.
617 * @param parent_queue is the parent queue at next level for this dq.
623 static void cvmx_pko_configure_dq(int node, int dq, int parent_queue, int prio, argument
635 debug("%s: dq %u parent %u child_base %u\n", __func__, dq,
663 /* scheduler configuration for this dq in the parent queue */
667 csr_wr_node(node, CVMX_PKO_DQX_SCHEDULE(dq), pko_dq_sche
[all...]
H A Dcvmx-pko3-compat.c282 cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command, argument
300 /* Derive destination node from dq */
301 desc.port_node = dq >> 10;
302 dq &= (1 << 10) - 1;
634 __cvmx_pko3_lmtdma(desc.port_node, dq, desc.word_count, tag_sw);
/u-boot/drivers/ram/sifive/
H A Dsifive_ddr.c155 u32 dq = 0; local
183 fails |= (1 << dq);
188 slicelsc += (dq % 10);
189 slicemsc += (dq / 10);
201 dq++;
/u-boot/drivers/ddr/marvell/a38x/
H A Dmv_ddr4_training_calibration.c34 static u8 vdq_tv; /* vref value for dq vref calibration */
73 /* dq vref calibration flow */
100 DEBUG_CALIBRATION(DEBUG_LEVEL_INFO, ("Starting ddr4 dq vref calibration training stage\n"));
145 ("error: %s: ddr4 centralization failed (dq vref tap index %d)!!!\n",
684 * getting pbs value per dq and setting pbs_taps_per_dq array.
894 * getting pbs value per dq and setting pbs_taps_per_dq array.
1110 int dq_to_dqs_delta[MAX_BUS_NUM][BUS_WIDTH_IN_BITS]; /* skew b/w dq and dqs */
1113 u32 new_pbs_per_byte[MAX_BUS_NUM]; /* dq pads' pbs value correction */
1310 /* deskew dq */
1354 /* calc dq ske
2048 u32 dq, pad; local
[all...]
/u-boot/drivers/net/ldpaa_eth/
H A Dldpaa_eth.c188 const struct ldpaa_dq *dq; local
210 dq = qbman_swp_dqrr_next(swp);
211 } while (get_timer(time_start) < timeo && !dq);
213 if (dq) {
219 status = (uint8_t)ldpaa_dq_flags(dq);
224 qbman_swp_dqrr_consume(swp, dq);
228 fd = ldpaa_dq_fd(dq);
232 qbman_swp_dqrr_consume(swp, dq);
/u-boot/lib/crypto/
H A Drsa_helper.c142 key->dq = value;
/u-boot/drivers/ram/rockchip/
H A Dsdram_rv1126.c1042 /* dq slew rate */
1131 /* MR11 for lp4 ca odt, dq odt set */
1501 * delta_sig: value for single signal: dq/dm
1716 /* set dq map for ddr4 */
1987 u8 dq; local
1997 for (dq = 0; dq < 8; dq++) {
1998 rd_result->dqs[dqs].dq_min[dq] =
1999 readb(PHY_REG(phy_base, phy_ofs + 0x15 + dq));
2016 u8 dq; local
[all...]

Completed in 323 milliseconds