• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/fs/gfs2/

Lines Matching defs:sdp

79 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
95 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
114 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
124 spin_lock(&sdp->sd_quota_spin);
125 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
139 list_add(&qd->qd_list, &sdp->sd_quota_list);
140 atomic_inc(&sdp->sd_quota_count);
144 spin_unlock(&sdp->sd_quota_spin);
155 error = qd_alloc(sdp, user, id, &new_qd);
163 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
165 spin_lock(&sdp->sd_quota_spin);
166 gfs2_assert(sdp, qd->qd_count);
168 spin_unlock(&sdp->sd_quota_spin);
173 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
174 spin_lock(&sdp->sd_quota_spin);
175 gfs2_assert(sdp, qd->qd_count);
178 spin_unlock(&sdp->sd_quota_spin);
183 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
187 spin_lock(&sdp->sd_quota_spin);
190 spin_unlock(&sdp->sd_quota_spin);
194 for (c = 0; c < sdp->sd_quota_chunks; c++)
196 byte = sdp->sd_quota_bitmap[c][o];
209 if (qd->qd_slot >= sdp->sd_quota_slots)
212 sdp->sd_quota_bitmap[c][o] |= 1 << b;
214 spin_unlock(&sdp->sd_quota_spin);
220 spin_unlock(&sdp->sd_quota_spin);
226 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
228 spin_lock(&sdp->sd_quota_spin);
229 gfs2_assert(sdp, qd->qd_slot_count);
231 spin_unlock(&sdp->sd_quota_spin);
236 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
238 spin_lock(&sdp->sd_quota_spin);
239 gfs2_assert(sdp, qd->qd_slot_count);
241 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
244 spin_unlock(&sdp->sd_quota_spin);
249 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
250 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
256 mutex_lock(&sdp->sd_quota_mutex);
259 mutex_unlock(&sdp->sd_quota_mutex);
263 block = qd->qd_slot / sdp->sd_qc_per_block;
264 offset = qd->qd_slot % sdp->sd_qc_per_block;;
274 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
282 mutex_unlock(&sdp->sd_quota_mutex);
290 mutex_unlock(&sdp->sd_quota_mutex);
296 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
298 mutex_lock(&sdp->sd_quota_mutex);
299 gfs2_assert(sdp, qd->qd_bh_count);
305 mutex_unlock(&sdp->sd_quota_mutex);
308 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
316 if (sdp->sd_vfs->s_flags & MS_RDONLY)
319 spin_lock(&sdp->sd_quota_spin);
321 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
324 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
327 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
330 gfs2_assert_warn(sdp, qd->qd_count);
333 gfs2_assert_warn(sdp, qd->qd_slot_count);
343 spin_unlock(&sdp->sd_quota_spin);
346 gfs2_assert_warn(sdp, qd->qd_change_sync);
363 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
365 if (sdp->sd_vfs->s_flags & MS_RDONLY)
368 spin_lock(&sdp->sd_quota_spin);
372 spin_unlock(&sdp->sd_quota_spin);
376 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
379 gfs2_assert_warn(sdp, qd->qd_count);
382 gfs2_assert_warn(sdp, qd->qd_slot_count);
385 spin_unlock(&sdp->sd_quota_spin);
387 gfs2_assert_warn(sdp, qd->qd_change_sync);
408 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
413 error = qd_get(sdp, user, id, create, qdp);
443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
448 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
449 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
455 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
491 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
495 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
526 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
527 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
531 mutex_lock(&sdp->sd_quota_mutex);
545 spin_lock(&sdp->sd_quota_spin);
547 spin_unlock(&sdp->sd_quota_spin);
550 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
561 mutex_unlock(&sdp->sd_quota_mutex);
638 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
639 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
691 error = gfs2_trans_begin(sdp,
700 error = gfs2_trans_begin(sdp,
722 gfs2_trans_end(sdp);
742 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
743 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
751 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
809 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
817 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
842 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
843 struct gfs2_tune *gt = &sdp->sd_tune;
851 spin_lock(&sdp->sd_quota_spin);
853 spin_unlock(&sdp->sd_quota_spin);
866 value *= gfs2_jindex_size(sdp) * num;
913 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
916 sdp->sd_fsname, type,
925 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
935 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
946 spin_lock(&sdp->sd_quota_spin);
948 spin_unlock(&sdp->sd_quota_spin);
957 gfs2_tune_get(sdp,
991 int gfs2_quota_sync(struct gfs2_sbd *sdp)
994 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
999 sdp->sd_quota_sync_gen++;
1009 error = qd_fish(sdp, qda + num_qd);
1022 sdp->sd_quota_sync_gen;
1034 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1040 error = qd_get(sdp, user, id, CREATE, &qd);
1053 int gfs2_quota_init(struct gfs2_sbd *sdp)
1055 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1056 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1064 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1068 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1069 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1073 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1075 if (!sdp->sd_quota_bitmap)
1078 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1079 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1080 if (!sdp->sd_quota_bitmap[x])
1098 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1103 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1114 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1127 spin_lock(&sdp->sd_quota_spin);
1128 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1129 list_add(&qd->qd_list, &sdp->sd_quota_list);
1130 atomic_inc(&sdp->sd_quota_count);
1131 spin_unlock(&sdp->sd_quota_spin);
1142 fs_info(sdp, "found %u quota changes\n", found);
1147 gfs2_quota_cleanup(sdp);
1151 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1156 spin_lock(&sdp->sd_quota_spin);
1157 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1160 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1162 gfs2_assert_warn(sdp,
1163 atomic_read(&sdp->sd_quota_count) > 0);
1164 atomic_dec(&sdp->sd_quota_count);
1167 spin_unlock(&sdp->sd_quota_spin);
1173 gfs2_assert_warn(sdp, !qd->qd_change);
1174 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1175 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1182 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1184 struct list_head *head = &sdp->sd_quota_list;
1188 spin_lock(&sdp->sd_quota_spin);
1195 spin_unlock(&sdp->sd_quota_spin);
1197 spin_lock(&sdp->sd_quota_spin);
1202 atomic_dec(&sdp->sd_quota_count);
1203 spin_unlock(&sdp->sd_quota_spin);
1206 gfs2_assert_warn(sdp, !qd->qd_change);
1207 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1209 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1210 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1215 spin_lock(&sdp->sd_quota_spin);
1217 spin_unlock(&sdp->sd_quota_spin);
1219 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1221 if (sdp->sd_quota_bitmap) {
1222 for (x = 0; x < sdp->sd_quota_chunks; x++)
1223 kfree(sdp->sd_quota_bitmap[x]);
1224 kfree(sdp->sd_quota_bitmap);