• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/fs/nilfs2/

Lines Matching defs:sci

227 	struct nilfs_sc_info *sci;
237 sci = NILFS_SC(sbi);
238 if (sci != NULL) {
240 nilfs_segctor_start_timer(sci);
242 sci->sc_watermark)
243 nilfs_segctor_do_flush(sci, 0);
274 struct nilfs_sc_info *sci = NILFS_SC(sbi);
277 if (!sci || !sci->sc_flush_request)
280 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
284 if (sci->sc_flush_request &&
285 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
289 nilfs_segctor_do_immediate_flush(sci);
336 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
340 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
341 unsigned blocksize = sci->sc_super->s_blocksize;
357 * @sci: nilfs_sc_info
359 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
361 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
369 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime,
370 sci->sc_sbi->s_nilfs->ns_cno);
376 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
377 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
378 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
382 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
384 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
385 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
388 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
389 return nilfs_segctor_reset_segment_buffer(sci);
392 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
394 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
398 err = nilfs_segctor_feed_segment(sci);
401 segbuf = sci->sc_curseg;
413 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
416 unsigned blocksize = sci->sc_super->s_blocksize;
420 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
424 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
427 sci->sc_curseg->sb_sum.nfinfo++;
428 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
430 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
432 if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
433 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
437 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
444 if (sci->sc_blk_cnt == 0)
448 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
451 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
452 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
455 segbuf = sci->sc_curseg;
456 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
457 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
458 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
459 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
462 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
471 segbuf = sci->sc_curseg;
473 sci, &sci->sc_binfo_ptr, binfo_size);
475 nilfs_segctor_end_finfo(sci, inode);
476 err = nilfs_segctor_feed_segment(sci);
486 if (sci->sc_blk_cnt == 0)
487 nilfs_segctor_begin_finfo(sci, inode);
489 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
492 sci->sc_blk_cnt++;
511 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
519 sci->sc_super);
521 err = nilfs_segctor_add_file_block(sci, bh, inode,
524 sci->sc_datablk_cnt++;
528 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
537 sci->sc_super);
541 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
546 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
549 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
554 sci, ssp, sizeof(*binfo_v));
558 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
563 sci, ssp, sizeof(*vblocknr));
575 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
583 sci->sc_super);
585 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
587 sci->sc_datablk_cnt++;
591 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
595 return nilfs_segctor_add_file_block(sci, bh, inode,
599 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
603 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
608 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
613 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
775 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
777 return list_empty(&sci->sc_dirty_files) &&
778 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
779 sci->sc_nfreesegs == 0 &&
780 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
783 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
785 struct nilfs_sb_info *sbi = sci->sc_sbi;
789 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
792 if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci))
799 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
801 struct nilfs_sb_info *sbi = sci->sc_sbi;
810 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
812 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
833 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
835 struct nilfs_sb_info *sbi = sci->sc_sbi;
854 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
855 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
858 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
888 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci,
893 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
899 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
906 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
912 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
946 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
959 err = collect(sci, bh, inode);
977 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
980 return sci->sc_segbuf_nblocks -
981 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
984 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
992 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
993 size_t n, rest = nilfs_segctor_buffer_rest(sci);
999 sci, inode, &data_buffers,
1007 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1009 sci, inode, &data_buffers, sc_ops->collect_data);
1013 sci, inode, &node_buffers, NULL);
1016 sci->sc_stage.flags |= NILFS_CF_NODE;
1020 sci, inode, &node_buffers, sc_ops->collect_node);
1026 sci, inode, &node_buffers, sc_ops->collect_bmap);
1030 nilfs_segctor_end_finfo(sci, inode);
1031 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1037 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1041 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1045 sci->sc_dsync_start,
1046 sci->sc_dsync_end);
1048 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1051 nilfs_segctor_end_finfo(sci, inode);
1058 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1060 struct nilfs_sb_info *sbi = sci->sc_sbi;
1067 switch (sci->sc_stage.scnt) {
1070 sci->sc_stage.flags = 0;
1072 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1073 sci->sc_nblk_inc = 0;
1074 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1076 sci->sc_stage.scnt = NILFS_ST_DSYNC;
1081 sci->sc_stage.dirty_file_ptr = NULL;
1082 sci->sc_stage.gc_inode_ptr = NULL;
1084 sci->sc_stage.scnt = NILFS_ST_DAT;
1087 sci->sc_stage.scnt++; /* Fall through */
1090 head = &sci->sc_gc_inodes;
1091 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1095 sci, &ii->vfs_inode,
1098 sci->sc_stage.gc_inode_ptr = list_entry(
1106 sci->sc_stage.gc_inode_ptr = NULL;
1108 sci->sc_stage.scnt++; /* Fall through */
1110 head = &sci->sc_dirty_files;
1111 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1116 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1119 sci->sc_stage.dirty_file_ptr =
1125 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1127 sci->sc_stage.dirty_file_ptr = NULL;
1129 sci->sc_stage.scnt = NILFS_ST_DONE;
1132 sci->sc_stage.scnt++;
1133 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1136 err = nilfs_segctor_scan_file(sci, sbi->s_ifile,
1140 sci->sc_stage.scnt++;
1142 err = nilfs_segctor_create_checkpoint(sci);
1147 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1151 sci->sc_stage.scnt++; /* Fall through */
1153 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1154 sci->sc_nfreesegs, &ndone);
1157 sci->sc_freesegs, ndone,
1161 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1163 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1167 sci->sc_stage.scnt++; /* Fall through */
1170 err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs),
1175 sci->sc_stage.scnt = NILFS_ST_DONE;
1178 sci->sc_stage.scnt++; /* Fall through */
1182 err = nilfs_segctor_add_super_root(sci);
1187 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1188 sci->sc_stage.scnt = NILFS_ST_DONE;
1192 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1193 ii = sci->sc_dsync_inode;
1197 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1200 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1201 sci->sc_stage.scnt = NILFS_ST_DONE;
1215 * @sci: nilfs_sc_info
1218 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1225 segbuf = nilfs_segbuf_new(sci->sc_super);
1229 if (list_empty(&sci->sc_write_logs)) {
1245 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1268 BUG_ON(!list_empty(&sci->sc_segbufs));
1269 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1270 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1278 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1287 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1301 segbuf = nilfs_segbuf_new(sci->sc_super);
1307 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1320 list_splice_tail(&list, &sci->sc_segbufs);
1371 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1378 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1383 sci->sc_seg_ctime);
1406 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1413 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1414 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1418 nilfs_truncate_logs(&sci->sc_segbufs, last);
1422 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1425 struct nilfs_cstage prev_stage = sci->sc_stage;
1430 sci->sc_nblk_this_inc = 0;
1431 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1433 err = nilfs_segctor_reset_segment_buffer(sci);
1437 err = nilfs_segctor_collect_blocks(sci, mode);
1438 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1446 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1449 nilfs_clear_logs(&sci->sc_segbufs);
1451 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1455 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1457 sci->sc_freesegs,
1458 sci->sc_nfreesegs,
1463 sci->sc_stage = prev_stage;
1465 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1482 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1510 sci, &ssp, sizeof(*finfo));
1538 sc_op->write_data_binfo(sci, &ssp, &binfo);
1540 sc_op->write_node_binfo(sci, &ssp, &binfo);
1554 err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super);
1558 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1563 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1564 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1640 static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1645 struct list_head *list = &sci->sc_copied_buffers;
1649 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1700 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1705 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1706 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1828 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1834 list_splice_tail_init(&sci->sc_write_logs, &logs);
1838 list_splice_tail_init(&sci->sc_segbufs, &logs);
1841 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
1843 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1845 sci->sc_freesegs,
1846 sci->sc_nfreesegs,
1865 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1869 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
1872 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1917 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1918 sci->sc_lseg_stime = jiffies;
1921 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1933 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
1935 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1938 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1942 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1944 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1946 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1953 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1954 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1955 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1956 nilfs_segctor_clear_metadata_dirty(sci);
1958 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1961 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1965 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1967 nilfs_segctor_complete_write(sci);
1968 nilfs_destroy_logs(&sci->sc_write_logs);
1973 static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
2008 list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
2017 static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
2025 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2047 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2049 struct nilfs_sb_info *sbi = sci->sc_sbi;
2054 sci->sc_stage.scnt = NILFS_ST_INIT;
2056 err = nilfs_segctor_check_in_files(sci, sbi);
2061 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2063 if (nilfs_segctor_clean(sci))
2067 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2069 err = nilfs_segctor_begin_construction(sci, nilfs);
2074 sci->sc_seg_ctime = get_seconds();
2076 err = nilfs_segctor_collect(sci, nilfs, mode);
2081 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
2082 nilfs_segbuf_empty(sci->sc_curseg)) {
2083 nilfs_segctor_abort_construction(sci, nilfs, 1);
2087 err = nilfs_segctor_assign(sci, mode);
2091 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2092 nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile);
2095 sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
2096 err = nilfs_segctor_fill_in_checkpoint(sci);
2100 nilfs_segctor_fill_in_super_root(sci, nilfs);
2102 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2105 err = nilfs_segctor_prepare_write(sci, &failed_page);
2107 nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
2111 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2114 err = nilfs_segctor_write(sci, nilfs);
2118 if (sci->sc_stage.scnt == NILFS_ST_DONE ||
2127 err = nilfs_segctor_wait(sci);
2131 } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2134 nilfs_segctor_check_out_files(sci, sbi);
2138 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2139 nilfs_redirty_inodes(&sci->sc_dirty_files);
2143 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2144 nilfs_segctor_abort_construction(sci, nilfs, err);
2150 * @sci: nilfs_sc_info
2156 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2158 spin_lock(&sci->sc_state_lock);
2159 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2160 sci->sc_timer.expires = jiffies + sci->sc_interval;
2161 add_timer(&sci->sc_timer);
2162 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2164 spin_unlock(&sci->sc_state_lock);
2167 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2169 spin_lock(&sci->sc_state_lock);
2170 if (!(sci->sc_flush_request & (1 << bn))) {
2171 unsigned long prev_req = sci->sc_flush_request;
2173 sci->sc_flush_request |= (1 << bn);
2175 wake_up(&sci->sc_wait_daemon);
2177 spin_unlock(&sci->sc_state_lock);
2188 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2190 if (!sci || nilfs_doing_construction())
2192 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2203 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2208 spin_lock(&sci->sc_state_lock);
2212 wait_req.seq = ++sci->sc_seq_request;
2213 spin_unlock(&sci->sc_state_lock);
2216 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2218 wake_up(&sci->sc_wait_daemon);
2232 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2236 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2241 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2242 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2245 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2255 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2278 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2282 if (!sci)
2288 err = nilfs_segctor_sync(sci);
2316 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2321 if (!sci)
2329 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2332 err = nilfs_segctor_sync(sci);
2344 sci->sc_dsync_inode = ii;
2345 sci->sc_dsync_start = start;
2346 sci->sc_dsync_end = end;
2348 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2359 * @sci: segment constructor object
2361 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2363 spin_lock(&sci->sc_state_lock);
2364 sci->sc_seq_accepted = sci->sc_seq_request;
2365 spin_unlock(&sci->sc_state_lock);
2366 del_timer_sync(&sci->sc_timer);
2371 * @sci: segment constructor object
2375 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2378 spin_lock(&sci->sc_state_lock);
2381 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2382 sci->sc_seq_done = sci->sc_seq_accepted;
2383 nilfs_segctor_wakeup(sci, err);
2384 sci->sc_flush_request = 0;
2387 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2389 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2392 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2393 time_before(jiffies, sci->sc_timer.expires))
2394 add_timer(&sci->sc_timer);
2396 spin_unlock(&sci->sc_state_lock);
2401 * @sci: segment constructor object
2404 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2406 struct nilfs_sb_info *sbi = sci->sc_sbi;
2411 nilfs_segctor_accept(sci);
2415 if (!nilfs_segctor_confirm(sci))
2416 err = nilfs_segctor_do_construct(sci, mode);
2421 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2435 nilfs_segctor_notify(sci, mode, err);
2463 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2468 if (unlikely(!sci))
2481 sci->sc_freesegs = kbufs[4];
2482 sci->sc_nfreesegs = argv[4].v_nmembs;
2483 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2486 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2487 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2495 schedule_timeout(sci->sc_interval);
2498 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2499 sci->sc_nfreesegs);
2509 sci->sc_freesegs = NULL;
2510 sci->sc_nfreesegs = 0;
2516 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2518 struct nilfs_sb_info *sbi = sci->sc_sbi;
2522 nilfs_segctor_construct(sci, mode);
2529 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2530 nilfs_segctor_start_timer(sci);
2535 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2540 spin_lock(&sci->sc_state_lock);
2541 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2543 spin_unlock(&sci->sc_state_lock);
2546 err = nilfs_segctor_do_construct(sci, mode);
2548 spin_lock(&sci->sc_state_lock);
2549 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2551 spin_unlock(&sci->sc_state_lock);
2553 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2556 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2558 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2559 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2560 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2562 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2577 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2578 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
2581 sci->sc_timer.data = (unsigned long)current;
2582 sci->sc_timer.function = nilfs_construction_timeout;
2585 sci->sc_task = current;
2586 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2590 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2592 spin_lock(&sci->sc_state_lock);
2597 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2600 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2602 else if (!sci->sc_flush_request)
2605 mode = nilfs_segctor_flush_mode(sci);
2607 spin_unlock(&sci->sc_state_lock);
2608 nilfs_segctor_thread_construct(sci, mode);
2609 spin_lock(&sci->sc_state_lock);
2615 spin_unlock(&sci->sc_state_lock);
2617 spin_lock(&sci->sc_state_lock);
2622 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2625 if (sci->sc_seq_request != sci->sc_seq_done)
2627 else if (sci->sc_flush_request)
2629 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2631 sci->sc_timer.expires);
2634 spin_unlock(&sci->sc_state_lock);
2636 spin_lock(&sci->sc_state_lock);
2638 finish_wait(&sci->sc_wait_daemon, &wait);
2639 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2640 time_after_eq(jiffies, sci->sc_timer.expires));
2648 spin_unlock(&sci->sc_state_lock);
2651 sci->sc_task = NULL;
2652 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2656 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2660 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2668 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2672 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2674 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2676 while (sci->sc_task) {
2677 wake_up(&sci->sc_wait_daemon);
2678 spin_unlock(&sci->sc_state_lock);
2679 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2680 spin_lock(&sci->sc_state_lock);
2689 struct nilfs_sc_info *sci;
2691 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2692 if (!sci)
2695 sci->sc_sbi = sbi;
2696 sci->sc_super = sbi->s_super;
2698 init_waitqueue_head(&sci->sc_wait_request);
2699 init_waitqueue_head(&sci->sc_wait_daemon);
2700 init_waitqueue_head(&sci->sc_wait_task);
2701 spin_lock_init(&sci->sc_state_lock);
2702 INIT_LIST_HEAD(&sci->sc_dirty_files);
2703 INIT_LIST_HEAD(&sci->sc_segbufs);
2704 INIT_LIST_HEAD(&sci->sc_write_logs);
2705 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2706 INIT_LIST_HEAD(&sci->sc_copied_buffers);
2707 init_timer(&sci->sc_timer);
2709 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2710 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2711 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2714 sci->sc_interval = sbi->s_interval;
2716 sci->sc_watermark = sbi->s_watermark;
2717 return sci;
2720 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2727 struct nilfs_sb_info *sbi = sci->sc_sbi;
2731 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2739 * @sci: nilfs_sc_info
2745 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2747 struct nilfs_sb_info *sbi = sci->sc_sbi;
2752 spin_lock(&sci->sc_state_lock);
2753 nilfs_segctor_kill_thread(sci);
2754 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2755 || sci->sc_seq_request != sci->sc_seq_done);
2756 spin_unlock(&sci->sc_state_lock);
2758 if (flag || !nilfs_segctor_confirm(sci))
2759 nilfs_segctor_write_out(sci);
2761 WARN_ON(!list_empty(&sci->sc_copied_buffers));
2763 if (!list_empty(&sci->sc_dirty_files)) {
2766 nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1);
2769 WARN_ON(!list_empty(&sci->sc_segbufs));
2770 WARN_ON(!list_empty(&sci->sc_write_logs));
2774 del_timer_sync(&sci->sc_timer);
2775 kfree(sci);