Searched refs:wb (Results 1 - 25 of 167) sorted by relevance

1234567

/linux-master/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/
H A Dia_css_wb.host.h32 const struct sh_css_isp_wb_params *wb,
37 const struct ia_css_wb_config *wb,
H A Dia_css_wb.host.c59 const struct sh_css_isp_wb_params *wb,
62 if (!wb) return;
65 "wb_gain_shift", wb->gain_shift);
67 "wb_gain_gr", wb->gain_gr);
69 "wb_gain_r", wb->gain_r);
71 "wb_gain_b", wb->gain_b);
73 "wb_gain_gb", wb->gain_gb);
58 ia_css_wb_dump( const struct sh_css_isp_wb_params *wb, unsigned int level) argument
/linux-master/include/linux/
H A Dbacking-dev-defs.h84 * Each wb (bdi_writeback) can perform writeback operations, is measured
86 * (bdi_writeback) is served by its embedded bdi->wb.
90 * memcg - blkcg combination can be served by its own wb by assigning a
91 * dedicated wb to each memcg, which enables isolation across different
95 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
98 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
100 * that a new wb for the combination can be created.
147 struct percpu_ref refcnt; /* used only for !root wb's */
186 struct bdi_writeback wb; /* the root writeback info for this bdi */ member in struct:backing_dev_info
190 struct mutex cgwb_release_mutex; /* protect shutdown of wb struct
217 wb_tryget(struct bdi_writeback *wb) argument
228 wb_get(struct bdi_writeback *wb) argument
239 wb_put_many(struct bdi_writeback *wb, unsigned long nr) argument
257 wb_put(struct bdi_writeback *wb) argument
268 wb_dying(struct bdi_writeback *wb) argument
275 wb_tryget(struct bdi_writeback *wb) argument
280 wb_get(struct bdi_writeback *wb) argument
284 wb_put(struct bdi_writeback *wb) argument
288 wb_put_many(struct bdi_writeback *wb, unsigned long nr) argument
292 wb_dying(struct bdi_writeback *wb) argument
[all...]
H A Dbacking-dev.h39 void wb_start_background_writeback(struct bdi_writeback *wb);
49 static inline bool wb_has_dirty_io(struct bdi_writeback *wb) argument
51 return test_bit(WB_has_dirty_io, &wb->state);
63 static inline void wb_stat_mod(struct bdi_writeback *wb, argument
66 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
69 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) argument
71 wb_stat_mod(wb, item, 1);
74 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) argument
76 wb_stat_mod(wb, item, -1);
79 static inline s64 wb_stat(struct bdi_writeback *wb, enu argument
84 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) argument
139 writeback_in_progress(struct bdi_writeback *wb) argument
193 struct bdi_writeback *wb; local
222 struct bdi_writeback *wb; local
[all...]
H A Dwriteback.h88 struct bdi_writeback *wb; /* wb this writeback is issued under */ member in struct:writeback_control
92 int wb_id; /* current wb id */
93 int wb_lcand_id; /* last foreign candidate wb id */
94 int wb_tcand_id; /* this foreign candidate wb id */
95 size_t wb_bytes; /* bytes written by current wb */
115 ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css)
121 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
123 * domain, global_wb_domain, that every wb i
[all...]
/linux-master/fs/bcachefs/
H A Dbtree_write_buffer.h9 struct btree_write_buffer *wb = &c->btree_write_buffer; local
11 return wb->inc.keys.nr + wb->flushing.keys.nr > wb->inc.keys.size / 4;
16 struct btree_write_buffer *wb = &c->btree_write_buffer; local
18 return wb->inc.keys.nr > wb->inc.keys.size * 3 / 4;
27 struct btree_write_buffer_keys *wb; member in struct:journal_keys_to_wb
45 struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
49 dst->wb
[all...]
H A Dbtree_write_buffer.c116 struct btree_write_buffered_key *wb)
122 trans->journal_res.seq = wb->journal_seq;
124 return bch2_trans_update(trans, iter, &wb->k,
134 struct btree_write_buffered_key *wb,
140 EBUG_ON(!wb->journal_seq);
142 EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
166 if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
168 return wb_flush_one_slowpath(trans, iter, wb);
171 bch2_btree_insert_key_leaf(trans, path, &wb->k, wb
114 wb_flush_one_slowpath(struct btree_trans *trans, struct btree_iter *iter, struct btree_write_buffered_key *wb) argument
133 wb_flush_one(struct btree_trans *trans, struct btree_iter *iter, struct btree_write_buffered_key *wb, bool *write_locked, size_t *fast) argument
187 btree_write_buffered_insert(struct btree_trans *trans, struct btree_write_buffered_key *wb) argument
205 move_keys_from_inc_to_flushing(struct btree_write_buffer *wb) argument
258 struct btree_write_buffer *wb = &c->btree_write_buffer; local
429 struct btree_write_buffer *wb = &c->btree_write_buffer; local
472 struct btree_write_buffer *wb = &c->btree_write_buffer; local
498 struct btree_write_buffer *wb = &c->btree_write_buffer; local
514 struct btree_write_buffer *wb = &c->btree_write_buffer; local
550 struct btree_write_buffer *wb = &c->btree_write_buffer; local
583 struct btree_write_buffer *wb = &c->btree_write_buffer; local
623 wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size) argument
638 struct btree_write_buffer *wb = &c->btree_write_buffer; local
646 struct btree_write_buffer *wb = &c->btree_write_buffer; local
658 struct btree_write_buffer *wb = &c->btree_write_buffer; local
[all...]
/linux-master/mm/
H A Dbacking-dev.c52 struct bdi_writeback *wb = &bdi->wb; local
60 spin_lock(&wb->list_lock);
61 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
63 list_for_each_entry(inode, &wb->b_io, i_io_list)
65 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
67 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
70 spin_unlock(&wb->list_lock);
73 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
90 (unsigned long) K(wb_stat(wb, WB_WRITEBAC
377 struct bdi_writeback *wb = container_of(to_delayed_work(work), local
388 wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, gfp_t gfp) argument
439 wb_shutdown(struct bdi_writeback *wb) argument
461 wb_exit(struct bdi_writeback *wb) argument
490 struct bdi_writeback *wb = container_of(rcu_head, local
499 struct bdi_writeback *wb = container_of(work, struct bdi_writeback, local
527 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, local
532 cgwb_kill(struct bdi_writeback *wb) argument
543 cgwb_remove_from_bdi_list(struct bdi_writeback *wb) argument
556 struct bdi_writeback *wb; local
671 struct bdi_writeback *wb; local
705 struct bdi_writeback *wb; local
736 struct bdi_writeback *wb; local
767 struct bdi_writeback *wb; local
815 struct bdi_writeback *wb, *next; local
834 struct bdi_writeback *wb, *next; local
880 cgwb_remove_from_bdi_list(struct bdi_writeback *wb) argument
[all...]
H A Dpage-writeback.c129 struct bdi_writeback *wb; member in struct:dirty_throttle_control
137 unsigned long wb_dirty; /* per-wb counterparts */
153 #define GDTC_INIT(__wb) .wb = (__wb), \
159 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
179 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) argument
181 return &wb->memcg_completions;
184 static void wb_min_max_ratio(struct bdi_writeback *wb, argument
187 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189 unsigned long long min = wb
233 wb_memcg_completions(struct bdi_writeback *wb) argument
238 wb_min_max_ratio(struct bdi_writeback *wb, unsigned long *minp, unsigned long *maxp) argument
579 __wb_writeout_add(struct bdi_writeback *wb, long nr) argument
593 wb_writeout_inc(struct bdi_writeback *wb) argument
888 wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) argument
1003 struct bdi_writeback *wb = dtc->wb; local
1180 wb_update_write_bandwidth(struct bdi_writeback *wb, unsigned long elapsed, unsigned long written) argument
1288 struct bdi_writeback *wb = dtc->wb; local
1446 struct bdi_writeback *wb = gdtc->wb; local
1485 wb_update_bandwidth(struct bdi_writeback *wb) argument
1495 wb_bandwidth_estimate_start(struct bdi_writeback *wb) argument
1527 wb_max_pause(struct bdi_writeback *wb, unsigned long wb_dirty) argument
1546 wb_min_pause(struct bdi_writeback *wb, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause) argument
1623 struct bdi_writeback *wb = dtc->wb; local
1669 balance_dirty_pages(struct bdi_writeback *wb, unsigned long pages_dirtied, unsigned int flags) argument
1997 struct bdi_writeback *wb = NULL; local
2077 wb_over_bg_thresh(struct bdi_writeback *wb) argument
2604 struct bdi_writeback *wb; local
2668 struct bdi_writeback *wb; local
2692 folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) argument
2791 struct bdi_writeback *wb; local
2882 struct bdi_writeback *wb; local
2922 struct bdi_writeback *wb; local
2975 wb_inode_writeback_start(struct bdi_writeback *wb) argument
2980 wb_inode_writeback_end(struct bdi_writeback *wb) argument
3014 struct bdi_writeback *wb = inode_to_wb(inode); local
3063 struct bdi_writeback *wb = inode_to_wb(inode); local
[all...]
/linux-master/fs/
H A Dfs-writeback.c85 static bool wb_io_lists_populated(struct bdi_writeback *wb) argument
87 if (wb_has_dirty_io(wb)) {
90 set_bit(WB_has_dirty_io, &wb->state);
91 WARN_ON_ONCE(!wb->avg_write_bandwidth);
92 atomic_long_add(wb->avg_write_bandwidth,
93 &wb->bdi->tot_write_bandwidth);
98 static void wb_io_lists_depopulated(struct bdi_writeback *wb) argument
100 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
101 list_empty(&wb
118 inode_io_list_move_locked(struct inode *inode, struct bdi_writeback *wb, struct list_head *head) argument
136 wb_wakeup(struct bdi_writeback *wb) argument
158 wb_wakeup_delayed(struct bdi_writeback *wb) argument
169 finish_writeback_work(struct bdi_writeback *wb, struct wb_writeback_work *work) argument
185 wb_queue_work(struct bdi_writeback *wb, struct wb_writeback_work *work) argument
268 struct bdi_writeback *wb = NULL; local
304 inode_cgwb_move_to_attached(struct inode *inode, struct bdi_writeback *wb) argument
333 struct bdi_writeback *wb = inode_to_wb(inode); variable in typeref:struct:bdi_writeback
341 wb_get(wb); variable
347 wb_put(wb); /* @inode already has ref */ variable
352 wb_put(wb); variable
667 cleanup_offline_cgwb(struct bdi_writeback *wb) argument
807 struct bdi_writeback *wb = wbc->wb; local
952 wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) argument
987 struct bdi_writeback *wb = list_entry(&bdi->wb_list, local
1068 struct bdi_writeback *wb; local
1176 inode_cgwb_move_to_attached(struct inode *inode, struct bdi_writeback *wb) argument
1193 struct bdi_writeback *wb = inode_to_wb(inode); variable in typeref:struct:bdi_writeback
1203 struct bdi_writeback *wb = inode_to_wb(inode); variable in typeref:struct:bdi_writeback
1209 wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) argument
1238 wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) argument
1269 wb_start_background_writeback(struct bdi_writeback *wb) argument
1284 struct bdi_writeback *wb; local
1343 redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) argument
1368 redirty_tail(struct inode *inode, struct bdi_writeback *wb) argument
1378 requeue_io(struct inode *inode, struct bdi_writeback *wb) argument
1474 queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, unsigned long dirtied_before) argument
1563 requeue_inode(struct inode *inode, struct bdi_writeback *wb, struct writeback_control *wbc) argument
1736 struct bdi_writeback *wb; local
1808 writeback_chunk_size(struct bdi_writeback *wb, struct wb_writeback_work *work) argument
1848 writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, struct wb_writeback_work *work) argument
1993 __writeback_inodes_wb(struct bdi_writeback *wb, struct wb_writeback_work *work) argument
2027 writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, enum wb_reason reason) argument
2064 wb_writeback(struct bdi_writeback *wb, struct wb_writeback_work *work) argument
2163 get_next_work_item(struct bdi_writeback *wb) argument
2177 wb_check_background_flush(struct bdi_writeback *wb) argument
2195 wb_check_old_data_flush(struct bdi_writeback *wb) argument
2229 wb_check_start_all(struct bdi_writeback *wb) argument
2256 wb_do_writeback(struct bdi_writeback *wb) argument
2289 struct bdi_writeback *wb = container_of(to_delayed_work(work), local
2331 struct bdi_writeback *wb; local
2390 struct bdi_writeback *wb; local
2449 struct bdi_writeback *wb = NULL; local
[all...]
/linux-master/drivers/gpu/drm/msm/disp/dpu1/
H A Ddpu_hw_wb.h26 * struct dpu_hw_wb_ops : Interface to the wb hw driver functions
37 struct dpu_hw_wb_cfg *wb);
40 struct dpu_hw_wb_cfg *wb);
43 struct dpu_hw_wb_cfg *wb);
69 /* wb path */
H A Ddpu_hw_wb.c126 static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb) argument
131 image_size = (wb->dest.height << 16) | wb->dest.width;
133 out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
/linux-master/certs/
H A Dextract-cert.c79 static BIO *wb; variable
87 if (!wb) {
88 wb = BIO_new_file(cert_dst, "wb");
89 ERR(!wb, "%s", cert_dst);
92 ERR(!i2d_X509_bio(wb, x509), "%s", cert_dst);
120 FILE *f = fopen(cert_dst, "wb");
156 if (wb && !x509) {
169 BIO_free(wb);
/linux-master/drivers/gpu/drm/radeon/
H A Dr600_dma.c55 if (rdev->wb.enabled)
56 rptr = rdev->wb.wb[ring->rptr_offs/4];
141 /* set the wb address whether it's enabled or not */
143 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
145 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
147 if (rdev->wb.enabled)
243 gpu_addr = rdev->wb.gpu_addr + index;
246 rdev->wb.wb[inde
[all...]
H A Dradeon_device.c424 rdev->wb.enabled = false;
438 if (rdev->wb.wb_obj) {
439 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
440 radeon_bo_kunmap(rdev->wb.wb_obj);
441 radeon_bo_unpin(rdev->wb.wb_obj);
442 radeon_bo_unreserve(rdev->wb.wb_obj);
444 radeon_bo_unref(&rdev->wb.wb_obj);
445 rdev->wb.wb = NULL;
446 rdev->wb
[all...]
H A Dni_dma.c57 if (rdev->wb.enabled) {
58 rptr = rdev->wb.wb[ring->rptr_offs/4];
127 if (rdev->wb.enabled) {
220 /* set the wb address whether it's enabled or not */
222 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
224 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
226 if (rdev->wb.enabled)
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_txrx_lib.h81 if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
113 if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
114 return le16_to_cpu(rx_desc->wb.l2tag1);
117 if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
118 return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
/linux-master/include/trace/events/
H A Dwriteback.h151 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) argument
153 return cgroup_ino(wb->memcg_css->cgroup);
158 if (wbc->wb)
159 return __trace_wb_assign_cgroup(wbc->wb);
165 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) argument
239 TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
241 TP_ARGS(folio, wb),
256 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
257 __entry->bdi_id = wb->bdi->id;
259 __entry->memcg_id = wb
[all...]
/linux-master/security/apparmor/
H A Dmatch.c627 #define inc_wb_pos(wb) \
629 wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
630 wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
634 static bool is_loop(struct match_workbuf *wb, aa_state_t state, argument
637 aa_state_t pos = wb->pos;
640 if (wb->history[pos] < state)
643 for (i = 0; i <= wb->len; i++) {
644 if (wb
657 leftmatch_fb(struct aa_dfa *dfa, aa_state_t start, const char *str, struct match_workbuf *wb, unsigned int *count) argument
[all...]
/linux-master/drivers/net/ethernet/intel/igc/
H A Digc_base.h24 } wb; member in union:igc_adv_tx_desc
85 } wb; /* writeback */ member in union:igc_adv_rx_desc
/linux-master/drivers/net/wireless/microchip/wilc1000/
H A Dspi.c383 static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen) argument
392 .tx_buf = wb,
505 u8 wb[32], rb[32]; local
512 memset(wb, 0x0, sizeof(wb));
514 c = (struct wilc_spi_cmd *)wb;
535 c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
540 if (cmd_len + resp_len > ARRAY_SIZE(wb)) {
543 cmd_len, resp_len, ARRAY_SIZE(wb));
547 if (wilc_spi_tx_rx(wilc, wb, r
600 u8 wb[32], rb[32]; local
675 u8 wb[32], rb[32]; local
801 u8 wb[32], rb[32]; local
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ih.c97 ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4;
98 ih->wptr_cpu = &adev->wb.wb[wptr_offs];
99 ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
100 ih->rptr_cpu = &adev->wb.wb[rptr_offs];
/linux-master/drivers/usb/class/
H A Dcdc-acm.c157 usb_poison_urb(acm->wb[i].urb);
169 usb_unpoison_urb(acm->wb[i].urb);
182 struct acm_wb *wb; local
187 wb = &acm->wb[wbn];
188 if (!wb->use) {
189 wb->use = true;
190 wb->len = 0;
207 if(acm->wb[i].use)
216 static void acm_write_done(struct acm *acm, struct acm_wb *wb) argument
229 acm_start_wb(struct acm *acm, struct acm_wb *wb) argument
581 struct acm_wb *wb = urb->context; local
755 struct acm_wb *wb; local
810 struct acm_wb *wb; local
1132 struct acm_wb *wb; local
1151 struct acm_wb *wb; local
[all...]
/linux-master/drivers/media/platform/mediatek/vcodec/encoder/venc/
H A Dvenc_vp8_if.c155 struct venc_vp8_vpu_buf *wb = inst->vsi->work_bufs; local
158 if (wb[i].size == 0)
161 * This 'wb' structure is set by VPU side and shared to AP for
170 inst->work_bufs[i].size = wb[i].size;
189 wb[i].vpua);
190 memcpy(inst->work_bufs[i].va, tmp_va, wb[i].size);
192 wb[i].iova = inst->work_bufs[i].dma_addr;
/linux-master/include/uapi/drm/
H A Dlima_drm.h84 __u32 wb[3 * LIMA_PP_WB_REG_NUM]; member in struct:drm_lima_m400_pp_frame
93 __u32 wb[3 * LIMA_PP_WB_REG_NUM]; member in struct:drm_lima_m450_pp_frame

Completed in 258 milliseconds

1234567