Lines Matching refs:cc

52 	int (*init_compress_ctx)(struct compress_ctx *cc);
53 void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 int (*compress_pages)(struct compress_ctx *cc);
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
63 return index & (cc->cluster_size - 1);
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
68 return index >> cc->log_cluster_size;
71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
73 return cc->cluster_idx << cc->log_cluster_size;
100 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
105 if (!cc->rpages[i])
108 unlock_page(cc->rpages[i]);
110 put_page(cc->rpages[i]);
114 static void f2fs_put_rpages(struct compress_ctx *cc)
116 f2fs_drop_rpages(cc, cc->cluster_size, false);
119 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
121 f2fs_drop_rpages(cc, len, true);
124 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
129 for (i = 0; i < cc->cluster_size; i++) {
130 if (!cc->rpages[i])
133 redirty_page_for_writepage(wbc, cc->rpages[i]);
134 f2fs_put_page(cc->rpages[i], unlock);
143 int f2fs_init_compress_ctx(struct compress_ctx *cc)
145 if (cc->rpages)
148 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
149 return cc->rpages ? 0 : -ENOMEM;
152 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
154 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
155 cc->rpages = NULL;
156 cc->nr_rpages = 0;
157 cc->nr_cpages = 0;
158 cc->valid_nr_cpages = 0;
160 cc->cluster_idx = NULL_CLUSTER;
163 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
167 if (!f2fs_cluster_can_merge_page(cc, page->index))
168 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
170 cluster_ofs = offset_in_cluster(cc, page->index);
171 cc->rpages[cluster_ofs] = page;
172 cc->nr_rpages++;
173 cc->cluster_idx = cluster_idx(cc, page->index);
177 static int lzo_init_compress_ctx(struct compress_ctx *cc)
179 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
181 if (!cc->private)
184 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
188 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
190 kvfree(cc->private);
191 cc->private = NULL;
194 static int lzo_compress_pages(struct compress_ctx *cc)
198 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
199 &cc->clen, cc->private);
202 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
240 static int lz4_init_compress_ctx(struct compress_ctx *cc)
245 if (F2FS_I(cc->inode)->i_compress_level)
249 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
250 if (!cc->private)
254 * we do not change cc->clen to LZ4_compressBound(inputsize) to
258 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
262 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
264 kvfree(cc->private);
265 cc->private = NULL;
268 static int lz4_compress_pages(struct compress_ctx *cc)
271 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
274 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
275 cc->clen, cc->private);
278 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
279 cc->clen, level, cc->private);
286 cc->clen = len;
331 static int zstd_init_compress_ctx(struct compress_ctx *cc)
337 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
343 params = zstd_get_params(level, cc->rlen);
346 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
354 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
360 cc->private = workspace;
361 cc->private2 = stream;
363 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
367 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
369 kvfree(cc->private);
370 cc->private = NULL;
371 cc->private2 = NULL;
374 static int zstd_compress_pages(struct compress_ctx *cc)
376 zstd_cstream *stream = cc->private2;
379 int src_size = cc->rlen;
384 inbuf.src = cc->rbuf;
388 outbuf.dst = cc->cbuf->cdata;
394 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
402 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
414 cc->clen = outbuf.pos;
508 static int lzorle_compress_pages(struct compress_ctx *cc)
512 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
513 &cc->clen, cc->private);
515 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
624 static int f2fs_compress_pages(struct compress_ctx *cc)
626 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
633 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
634 cc->cluster_size, fi->i_compress_algorithm);
637 ret = cops->init_compress_ctx(cc);
642 max_len = COMPRESS_HEADER_SIZE + cc->clen;
643 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
644 cc->valid_nr_cpages = cc->nr_cpages;
646 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
647 if (!cc->cpages) {
652 for (i = 0; i < cc->nr_cpages; i++)
653 cc->cpages[i] = f2fs_compress_alloc_page();
655 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
656 if (!cc->rbuf) {
661 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
662 if (!cc->cbuf) {
667 ret = cops->compress_pages(cc);
671 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
673 if (cc->clen > max_len) {
678 cc->cbuf->clen = cpu_to_le32(cc->clen);
681 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
682 cc->cbuf->cdata, cc->clen);
683 cc->cbuf->chksum = cpu_to_le32(chksum);
686 cc->cbuf->reserved[i] = cpu_to_le32(0);
688 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
691 memset(&cc->cbuf->cdata[cc->clen], 0,
693 (cc->clen + COMPRESS_HEADER_SIZE));
695 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
696 vm_unmap_ram(cc->rbuf, cc->cluster_size);
698 for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
699 f2fs_compress_free_page(cc->cpages[i]);
700 cc->cpages[i] = NULL;
704 cops->destroy_compress_ctx(cc);
706 cc->valid_nr_cpages = new_nr_cpages;
708 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
709 cc->clen, ret);
713 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
715 vm_unmap_ram(cc->rbuf, cc->cluster_size);
717 for (i = 0; i < cc->nr_cpages; i++) {
718 if (cc->cpages[i])
719 f2fs_compress_free_page(cc->cpages[i]);
721 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
722 cc->cpages = NULL;
725 cops->destroy_compress_ctx(cc);
727 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
728 cc->clen, ret);
826 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
828 if (cc->cluster_idx == NULL_CLUSTER)
830 return cc->cluster_idx == cluster_idx(cc, index);
833 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
835 return cc->nr_rpages == 0;
838 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
840 return cc->cluster_size == cc->nr_rpages;
843 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
845 if (f2fs_cluster_is_empty(cc))
847 return is_page_in_cluster(cc, index);
850 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
860 if (uptodate && (pgidx % cc->cluster_size))
863 if (nr_pages - index < cc->cluster_size)
866 for (; i < cc->cluster_size; i++) {
876 static bool cluster_has_invalid_data(struct compress_ctx *cc)
878 loff_t i_size = i_size_read(cc->inode);
882 for (i = 0; i < cc->cluster_size; i++) {
883 struct page *page = cc->rpages[i];
885 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
998 static int f2fs_compressed_blocks(struct compress_ctx *cc)
1000 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
1011 static bool cluster_may_compress(struct compress_ctx *cc)
1013 if (!f2fs_need_compress_data(cc->inode))
1015 if (f2fs_is_atomic_file(cc->inode))
1017 if (!f2fs_cluster_is_full(cc))
1019 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1021 return !cluster_has_invalid_data(cc);
1024 static void set_cluster_writeback(struct compress_ctx *cc)
1028 for (i = 0; i < cc->cluster_size; i++) {
1029 if (cc->rpages[i])
1030 set_page_writeback(cc->rpages[i]);
1034 static void set_cluster_dirty(struct compress_ctx *cc)
1038 for (i = 0; i < cc->cluster_size; i++)
1039 if (cc->rpages[i]) {
1040 set_page_dirty(cc->rpages[i]);
1041 set_page_private_gcing(cc->rpages[i]);
1045 static int prepare_compress_overwrite(struct compress_ctx *cc,
1048 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1049 struct address_space *mapping = cc->inode->i_mapping;
1053 pgoff_t start_idx = start_idx_of_cluster(cc);
1057 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1061 ret = f2fs_init_compress_ctx(cc);
1066 for (i = 0; i < cc->cluster_size; i++) {
1077 f2fs_compress_ctx_add_page(cc, page);
1080 if (!f2fs_cluster_is_empty(cc)) {
1083 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1085 f2fs_put_rpages(cc);
1086 f2fs_destroy_compress_ctx(cc, true);
1092 ret = f2fs_init_compress_ctx(cc);
1097 for (i = 0; i < cc->cluster_size; i++) {
1098 f2fs_bug_on(sbi, cc->rpages[i]);
1107 f2fs_compress_ctx_add_page(cc, page);
1111 f2fs_put_rpages(cc);
1112 f2fs_unlock_rpages(cc, i + 1);
1113 f2fs_destroy_compress_ctx(cc, true);
1119 *fsdata = cc->rpages;
1120 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1121 return cc->cluster_size;
1125 f2fs_put_rpages(cc);
1126 f2fs_unlock_rpages(cc, i);
1127 f2fs_destroy_compress_ctx(cc, true);
1135 struct compress_ctx cc = {
1144 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1151 struct compress_ctx cc = {
1157 bool first_index = (index == cc.rpages[0]->index);
1160 set_cluster_dirty(&cc);
1162 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1163 f2fs_destroy_compress_ctx(&cc, false);
1217 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1222 struct inode *inode = cc->inode;
1227 .ino = cc->inode->i_ino,
1238 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1244 pgoff_t start_idx = start_idx_of_cluster(cc);
1245 unsigned int last_index = cc->cluster_size - 1;
1252 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1267 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1273 for (i = 0; i < cc->cluster_size; i++) {
1279 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1293 atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1294 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1298 cic->nr_rpages = cc->cluster_size;
1300 for (i = 0; i < cc->valid_nr_cpages; i++) {
1301 f2fs_set_compressed_page(cc->cpages[i], inode,
1302 cc->rpages[i + 1]->index, cic);
1303 fio.compressed_page = cc->cpages[i];
1312 fio.page = cc->rpages[i + 1];
1316 cc->cpages[i] = fio.encrypted_page;
1320 set_cluster_writeback(cc);
1322 for (i = 0; i < cc->cluster_size; i++)
1323 cic->rpages[i] = cc->rpages[i];
1325 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1329 fio.page = cc->rpages[i];
1345 if (i > cc->valid_nr_cpages) {
1356 fio.encrypted_page = cc->cpages[i - 1];
1358 fio.compressed_page = cc->cpages[i - 1];
1360 cc->cpages[i - 1] = NULL;
1364 inode_dec_dirty_pages(cc->inode);
1370 f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1371 add_compr_block_stat(inode, cc->valid_nr_cpages);
1373 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1386 f2fs_put_rpages(cc);
1387 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1388 cc->cpages = NULL;
1389 f2fs_destroy_compress_ctx(cc, false);
1393 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1396 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1407 for (i = 0; i < cc->valid_nr_cpages; i++) {
1408 f2fs_compress_free_page(cc->cpages[i]);
1409 cc->cpages[i] = NULL;
1411 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1412 cc->cpages = NULL;
1445 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1450 struct address_space *mapping = cc->inode->i_mapping;
1455 compr_blocks = f2fs_compressed_blocks(cc);
1457 for (i = 0; i < cc->cluster_size; i++) {
1458 if (!cc->rpages[i])
1461 redirty_page_for_writepage(wbc, cc->rpages[i]);
1462 unlock_page(cc->rpages[i]);
1472 for (i = 0; i < cc->cluster_size; i++) {
1473 if (!cc->rpages[i])
1476 lock_page(cc->rpages[i]);
1478 if (cc->rpages[i]->mapping != mapping) {
1480 unlock_page(cc->rpages[i]);
1484 if (!PageDirty(cc->rpages[i]))
1487 if (PageWriteback(cc->rpages[i])) {
1490 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1493 if (!clear_page_dirty_for_io(cc->rpages[i]))
1496 ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
1501 unlock_page(cc->rpages[i]);
1510 if (IS_NOQUOTA(cc->inode))
1529 int f2fs_write_multi_pages(struct compress_ctx *cc,
1537 if (cluster_may_compress(cc)) {
1538 err = f2fs_compress_pages(cc);
1540 add_compr_block_stat(cc->inode, cc->cluster_size);
1543 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1547 err = f2fs_write_compressed_pages(cc, submitted,
1551 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1554 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1556 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1557 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1559 f2fs_destroy_compress_ctx(cc, false);
1628 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1631 pgoff_t start_idx = start_idx_of_cluster(cc);
1632 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1639 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1646 dic->inode = cc->inode;
1647 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1648 dic->cluster_idx = cc->cluster_idx;
1649 dic->cluster_size = cc->cluster_size;
1650 dic->log_cluster_size = cc->log_cluster_size;
1651 dic->nr_cpages = cc->nr_cpages;
1654 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1657 dic->rpages[i] = cc->rpages[i];
1658 dic->nr_rpages = cc->cluster_size;
1670 f2fs_set_compressed_page(page, cc->inode,