Searched refs:ca (Results 76 - 100 of 175) sorted by relevance

1234567

/linux-master/fs/bcachefs/
H A Djournal_reclaim.c78 journal_dev_space_available(struct journal *j, struct bch_dev *ca, argument
81 struct journal_device *ja = &ca->journal;
87 .next_entry = ca->mi.bucket_size,
88 .total = ca->mi.bucket_size * ja->nr,
107 if (unwritten > ca->mi.bucket_size)
117 sectors = ca->mi.bucket_size;
123 if (sectors < ca->mi.bucket_size && buckets) {
125 sectors = ca->mi.bucket_size;
130 .total = sectors + buckets * ca->mi.bucket_size,
144 for_each_member_device_rcu(c, ca,
[all...]
H A Dec.c101 struct bch_dev *ca; member in struct:ec_bio
167 struct bch_dev *ca,
244 ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type,
273 struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); local
274 if (unlikely(!ca)) {
280 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
286 __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags);
291 struct bucket *g = gc_bucket(ca, bucket.offset);
294 ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
297 bch2_dev_usage_update(c, ca,
166 __mark_stripe_bucket(struct btree_trans *trans, struct bch_dev *ca, struct bkey_s_c_stripe s, unsigned ptr_idx, bool deleting, struct bpos bucket, struct bch_alloc_v4 *a, enum btree_iter_update_trigger_flags flags) argument
636 struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev); local
705 struct bch_dev *ca = ec_bio->ca; local
740 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw); local
1203 ec_stripe_update_extent(struct btree_trans *trans, struct bch_dev *ca, struct bpos bucket, u8 gen, struct ec_stripe_buf *s, struct bpos *bp_pos) argument
1308 struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); local
1358 struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE); local
1559 struct bch_dev *ca = ob_dev(c, ob); local
2096 __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) argument
2129 bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) argument
[all...]
H A Derror.c51 struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work); local
52 struct bch_fs *c = ca->fs;
56 dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro,
59 ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
62 bch_err(ca,
68 void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type) argument
70 atomic64_inc(&ca->errors[type]);
71 //queue_work(system_long_wq, &ca->io_error_work);
H A Dextents.c76 struct bch_dev *ca = bch2_dev_rcu(c, dev); local
77 return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
138 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); local
140 if (p.ptr.cached && (!ca || dev_ptr_stale(ca, &p.ptr)))
149 if (!p.idx && !ca)
155 if (!p.idx && !bch2_dev_is_readable(ca))
293 struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev); local
294 bool same_bucket = ca
673 __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p) argument
685 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); local
692 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); local
880 struct bch_dev *ca; local
996 struct bch_dev *ca; local
1012 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); local
1107 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); local
[all...]
H A Djournal_sb.c176 int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca, argument
186 bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
187 bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal_v2);
195 j = bch2_sb_field_resize(&ca->disk_sb, journal_v2,
200 bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
H A Djournal.c911 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, argument
914 struct bch_fs *c = ca->fs;
915 struct journal_device *ja = &ca->journal;
935 bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
941 ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal,
948 bch2_trans_mark_metadata_bucket(trans, ca,
950 ca->mi.bucket_size, BTREE_TRIGGER_transactional));
994 ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1029 bch2_trans_mark_metadata_bucket(trans, ca,
1048 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, argument
1098 bch2_dev_journal_alloc(struct bch_dev *ca) argument
1163 bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca) argument
1279 bch2_dev_journal_exit(struct bch_dev *ca) argument
1294 bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) argument
[all...]
H A Dio_write.c39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency, argument
43 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
54 if (atomic_read(&ca->congested) < CONGESTED_MAX)
57 &ca->congested);
59 ca->congested_last = now;
60 } else if (atomic_read(&ca->congested) > 0) {
61 atomic_dec(&ca->congested);
65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) argument
67 atomic64_t *latency = &ca->cur_latency[rw];
89 bch2_congested_acct(ca, io_latenc
410 struct bch_dev *ca = nocow local
652 struct bch_dev *ca = wbio->have_ioref local
1268 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); local
1295 struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode); local
[all...]
H A Dbcachefs.h302 #define bch_err_dev(ca, fmt, ...) \
303 bch2_print(c, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
304 #define bch_err_dev_offset(ca, _offset, fmt, ...) \
305 bch2_print(c, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
313 #define bch_err_dev_ratelimited(ca, fmt, ...) \
314 bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
315 #define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
316 bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev_offset(ca, _offse
1212 bucket_bytes(const struct bch_dev *ca) argument
[all...]
/linux-master/kernel/power/
H A Dsnapshot.c301 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, argument
304 ca->chain = NULL;
305 ca->used_space = LINKED_PAGE_DATA_SIZE;
306 ca->gfp_mask = gfp_mask;
307 ca->safe_needed = safe_needed;
310 static void *chain_alloc(struct chain_allocator *ca, unsigned int size) argument
314 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
317 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
318 get_image_page(ca
442 alloc_rtree_node(gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca, struct list_head *list) argument
468 add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca) argument
538 create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca, unsigned long start, unsigned long end) argument
687 struct chain_allocator ca; local
2515 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) argument
2598 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) argument
2726 get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) argument
2782 static struct chain_allocator ca; local
[all...]
/linux-master/include/trace/events/
H A Dbcache.h429 TP_PROTO(struct cache *ca, size_t bucket),
430 TP_ARGS(ca, bucket),
439 __entry->dev = ca->bdev->bd_dev;
440 __entry->offset = bucket << ca->set->bucket_bits;
441 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
450 TP_PROTO(struct cache *ca, size_t bucket),
451 TP_ARGS(ca, bucket),
459 __entry->dev = ca->bdev->bd_dev;
460 __entry->offset = bucket << ca->set->bucket_bits;
468 TP_PROTO(struct cache *ca, unsigne
[all...]
/linux-master/drivers/media/pci/cx23885/
H A Daltera-ci.c112 struct dvb_ca_en50221 ca; member in struct:altera_ci_state
504 if (state->ca.data != NULL)
505 dvb_ca_en50221_release(&state->ca);
748 state->ca.owner = THIS_MODULE;
749 state->ca.read_attribute_mem = altera_ci_read_attribute_mem;
750 state->ca.write_attribute_mem = altera_ci_write_attribute_mem;
751 state->ca.read_cam_control = altera_ci_read_cam_ctl;
752 state->ca.write_cam_control = altera_ci_write_cam_ctl;
753 state->ca.slot_reset = altera_ci_slot_reset;
754 state->ca
[all...]
/linux-master/drivers/media/pci/ttpci/
H A Dbudget-av.c59 struct dvb_ca_en50221 ca; member in struct:budget_av
63 static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot);
123 static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) argument
125 struct budget_av *budget_av = ca->data;
136 ciintf_slot_shutdown(ca, slot);
142 static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) argument
144 struct budget_av *budget_av = ca->data;
155 ciintf_slot_shutdown(ca, slot);
161 static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) argument
163 struct budget_av *budget_av = ca
181 ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) argument
200 ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot) argument
229 ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) argument
245 ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) argument
260 ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) argument
[all...]
H A Dbudget-ci.c98 struct dvb_ca_en50221 ca; member in struct:budget_ci
253 static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) argument
255 struct budget_ci *budget_ci = ca->data;
264 static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) argument
266 struct budget_ci *budget_ci = ca->data;
275 static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) argument
277 struct budget_ci *budget_ci = ca->data;
286 static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) argument
288 struct budget_ci *budget_ci = ca->data;
297 static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, in argument
320 ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) argument
333 ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) argument
402 ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) argument
[all...]
/linux-master/net/ipv4/
H A Dtcp_vegas.c294 const struct vegas *ca = inet_csk_ca(sk); local
297 info->vegas.tcpv_enabled = ca->doing_vegas_now;
298 info->vegas.tcpv_rttcnt = ca->cntRTT;
299 info->vegas.tcpv_rtt = ca->baseRTT;
300 info->vegas.tcpv_minrtt = ca->minRTT;
H A Dtcp_westwood.c265 const struct westwood *ca = inet_csk_ca(sk); local
270 info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt);
271 info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
/linux-master/tools/testing/selftests/move_mount_set_group/
H A Dmove_mount_set_group_test.c311 struct child_args *ca = (struct child_args *)data; local
318 if (ca->shared) {
327 ca->unsfd = ret;
332 ca->mntnsfd = ret;
337 ca->mntfd = ret;
/linux-master/drivers/infiniband/ulp/ipoib/
H A Dipoib_ethtool.c66 ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
68 strscpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
197 ret = ib_query_port(priv->ca, priv->port, &attr);
H A Dipoib_ib.c96 ib_dma_unmap_single(priv->ca, mapping[0],
142 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
144 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
275 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) argument
283 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
285 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
294 mapping[i + off] = ib_dma_map_page(ca,
299 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
308 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
312 ib_dma_unmap_single(ca, mappin
[all...]
/linux-master/net/sched/
H A Dact_connmark.c38 struct tcf_connmark_info *ca = to_connmark(a); local
44 tcf_lastuse_update(&ca->tcf_tm);
45 tcf_action_update_bstats(&ca->common, skb);
47 parms = rcu_dereference_bh(ca->parms);
89 tcf_action_inc_overlimit_qstats(&ca->common);
91 return READ_ONCE(ca->tcf_action);
/linux-master/drivers/sbus/char/
H A Doradax.c166 void *ca; /* Completion Address */ member in struct:dax_ccb
225 static int dax_ccb_info(u64 ca, struct ccb_info_result *info);
226 static int dax_ccb_kill(u64 ca, u16 *kill_res);
566 unsigned long ca; local
588 ca = ctx->ca_buf_ra + hdr.ca_offset;
598 ret = dax_ccb_kill(ca, &ctx->result.kill.action);
618 ret = dax_ccb_info(ca, &ctx->result.info);
707 static int dax_ccb_kill(u64 ca, u16 *kill_res) argument
714 dax_dbg("attempting kill on ca_ra 0x%llx", ca);
715 hv_ret = sun4v_ccb_kill(ca, kill_re
734 dax_ccb_info(u64 ca, struct ccb_info_result *info) argument
[all...]
/linux-master/drivers/net/ethernet/
H A Dkorina.c251 u32 ca; /* Current Address. */ member in struct:dma_desc
426 dma_addr_t ca; local
452 ca = dma_map_single(lp->dmadev, skb->data, length, DMA_TO_DEVICE);
453 if (dma_mapping_error(lp->dmadev, ca))
456 lp->tx_skb_dma[idx] = ca;
457 td->ca = ca;
602 dma_addr_t ca; local
643 ca = dma_map_single(lp->dmadev, skb_new->data, KORINA_RBSIZE,
645 if (dma_mapping_error(lp->dmadev, ca)) {
991 dma_addr_t ca; local
[all...]
/linux-master/drivers/media/pci/mantis/
H A Dmantis_cards.c69 struct mantis_ca *ca; local
74 ca = mantis->mantis_ca;
99 wake_up(&ca->hif_write_wq);
100 schedule_work(&ca->hif_evm_work);
H A Dhopper_cards.c60 struct mantis_ca *ca; local
65 ca = mantis->mantis_ca;
90 wake_up(&ca->hif_write_wq);
91 schedule_work(&ca->hif_evm_work);
/linux-master/drivers/dma/
H A Dmilbeaut-hdmac.c119 u32 cb, ca, src_addr, dest_addr, len; local
147 ca = FIELD_PREP(MLB_HDMAC_IS, mc->slave_id);
149 ca |= FIELD_PREP(MLB_HDMAC_BT, 0xf);
151 ca |= FIELD_PREP(MLB_HDMAC_BT, 0xd);
153 ca |= FIELD_PREP(MLB_HDMAC_BT, 0xb);
155 ca |= FIELD_PREP(MLB_HDMAC_TC, (len / burst - 1));
156 writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
157 ca |= MLB_HDMAC_EB;
158 writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
/linux-master/drivers/clk/keystone/
H A Dsci-clk.c357 const struct sci_clk *ca = a; local
360 if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
362 if (ca->dev_id > cb->dev_id ||
363 (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
502 struct sci_clk *ca = container_of(a, struct sci_clk, node); local
505 return _cmp_sci_clk(ca, &cb);

Completed in 480 milliseconds

1234567