Lines Matching refs:ci

18 static int __remove_xattr(struct ceph_inode_info *ci,
36 ssize_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
38 bool (*exists_cb)(struct ceph_inode_info *ci);
49 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
51 struct ceph_file_layout *fl = &ci->i_layout;
57 static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
60 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
64 s64 pool = ci->i_layout.pool_id;
71 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
73 doutc(cl, "%p\n", &ci->netfs.inode);
79 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
80 ci->i_layout.object_size);
85 ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
86 ci->i_layout.object_size, pool);
143 static ssize_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
146 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_unit);
149 static ssize_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
152 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_count);
155 static ssize_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
158 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.object_size);
161 static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
165 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
167 s64 pool = ci->i_layout.pool_id;
183 static ssize_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
187 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
200 static ssize_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
203 return ceph_fmt_xattr(val, size, "%lld", ci->i_files + ci->i_subdirs);
206 static ssize_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
209 return ceph_fmt_xattr(val, size, "%lld", ci->i_files);
212 static ssize_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
215 return ceph_fmt_xattr(val, size, "%lld", ci->i_subdirs);
218 static ssize_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
222 ci->i_rfiles + ci->i_rsubdirs);
225 static ssize_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
228 return ceph_fmt_xattr(val, size, "%lld", ci->i_rfiles);
231 static ssize_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
234 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsubdirs);
237 static ssize_t ceph_vxattrcb_dir_rsnaps(struct ceph_inode_info *ci, char *val,
240 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsnaps);
243 static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
246 return ceph_fmt_xattr(val, size, "%lld", ci->i_rbytes);
249 static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
252 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
253 ci->i_rctime.tv_nsec);
257 static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci)
259 return ci->i_dir_pin != -ENODATA;
262 static ssize_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val,
265 return ceph_fmt_xattr(val, size, "%d", (int)ci->i_dir_pin);
269 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
272 spin_lock(&ci->i_ceph_lock);
273 if ((ci->i_max_files || ci->i_max_bytes) &&
274 ci->i_vino.snap == CEPH_NOSNAP &&
275 ci->i_snap_realm &&
276 ci->i_snap_realm->ino == ci->i_vino.ino)
278 spin_unlock(&ci->i_ceph_lock);
282 static ssize_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
286 ci->i_max_bytes, ci->i_max_files);
289 static ssize_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci,
292 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_bytes);
295 static ssize_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci,
298 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_files);
302 static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci)
304 return (ci->i_snap_btime.tv_sec != 0 || ci->i_snap_btime.tv_nsec != 0);
307 static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
310 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec,
311 ci->i_snap_btime.tv_nsec);
314 static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
317 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
322 static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
325 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
331 static ssize_t ceph_vxattrcb_caps(struct ceph_inode_info *ci, char *val,
336 spin_lock(&ci->i_ceph_lock);
337 issued = __ceph_caps_issued(ci, NULL);
338 spin_unlock(&ci->i_ceph_lock);
344 static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
349 spin_lock(&ci->i_ceph_lock);
351 ci->i_auth_cap ? ci->i_auth_cap->session->s_mds : -1);
352 spin_unlock(&ci->i_ceph_lock);
357 static bool ceph_vxattrcb_fscrypt_auth_exists(struct ceph_inode_info *ci)
359 return ci->fscrypt_auth_len;
362 static ssize_t ceph_vxattrcb_fscrypt_auth(struct ceph_inode_info *ci,
366 if (size < ci->fscrypt_auth_len)
368 memcpy(val, ci->fscrypt_auth, ci->fscrypt_auth_len);
370 return ci->fscrypt_auth_len;
568 static int __set_xattr(struct ceph_inode_info *ci,
574 struct inode *inode = &ci->netfs.inode;
582 p = &ci->i_xattrs.index.rb_node;
617 __remove_xattr(ci, xattr);
631 ci->i_xattrs.count++;
632 doutc(cl, "count=%d\n", ci->i_xattrs.count);
643 ci->i_xattrs.names_size -= xattr->name_len;
644 ci->i_xattrs.vals_size -= xattr->val_len;
646 ci->i_xattrs.names_size += name_len;
647 ci->i_xattrs.vals_size += val_len;
659 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
671 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
674 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
681 p = &ci->i_xattrs.index.rb_node;
718 static int __remove_xattr(struct ceph_inode_info *ci,
724 rb_erase(&xattr->node, &ci->i_xattrs.index);
731 ci->i_xattrs.names_size -= xattr->name_len;
732 ci->i_xattrs.vals_size -= xattr->val_len;
733 ci->i_xattrs.count--;
739 static char *__copy_xattr_names(struct ceph_inode_info *ci,
742 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
746 p = rb_first(&ci->i_xattrs.index);
747 doutc(cl, "count=%d\n", ci->i_xattrs.count);
755 xattr->name_len, ci->i_xattrs.names_size);
764 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
766 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
770 p = rb_first(&ci->i_xattrs.index);
779 rb_erase(tmp, &ci->i_xattrs.index);
784 ci->i_xattrs.names_size = 0;
785 ci->i_xattrs.vals_size = 0;
786 ci->i_xattrs.index_version = 0;
787 ci->i_xattrs.count = 0;
788 ci->i_xattrs.index = RB_ROOT;
792 __releases(ci->i_ceph_lock)
793 __acquires(ci->i_ceph_lock)
801 struct ceph_inode_info *ci = ceph_inode(inode);
808 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
810 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
813 __ceph_destroy_xattrs(ci);
817 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
818 p = ci->i_xattrs.blob->vec.iov_base;
819 end = p + ci->i_xattrs.blob->vec.iov_len;
821 xattr_version = ci->i_xattrs.version;
822 spin_unlock(&ci->i_ceph_lock);
837 spin_lock(&ci->i_ceph_lock);
838 if (ci->i_xattrs.version != xattr_version) {
856 err = __set_xattr(ci, name, namelen, val, len,
864 ci->i_xattrs.index_version = ci->i_xattrs.version;
865 ci->i_xattrs.dirty = false;
869 spin_lock(&ci->i_ceph_lock);
876 ci->i_xattrs.names_size = 0;
880 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
883 struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
889 int size = 4 + ci->i_xattrs.count*(4 + 4) +
890 ci->i_xattrs.names_size +
891 ci->i_xattrs.vals_size;
892 doutc(cl, "c=%d names.size=%d vals.size=%d\n", ci->i_xattrs.count,
893 ci->i_xattrs.names_size, ci->i_xattrs.vals_size);
907 struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
909 struct inode *inode = &ci->netfs.inode;
917 if (ci->i_xattrs.dirty) {
918 int need = __get_required_blob_size(ci, 0, 0);
920 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
922 p = rb_first(&ci->i_xattrs.index);
923 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
925 ceph_encode_32(&dest, ci->i_xattrs.count);
940 ci->i_xattrs.prealloc_blob->vec.iov_len =
941 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
943 if (ci->i_xattrs.blob)
944 old_blob = ci->i_xattrs.blob;
945 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
946 ci->i_xattrs.prealloc_blob = NULL;
947 ci->i_xattrs.dirty = false;
948 ci->i_xattrs.version++;
975 struct ceph_inode_info *ci = ceph_inode(inode);
996 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
997 err = vxattr->getxattr_cb(ci, value, size);
1012 spin_lock(&ci->i_ceph_lock);
1014 ceph_vinop(inode), name, ci->i_xattrs.version,
1015 ci->i_xattrs.index_version);
1017 if (ci->i_xattrs.version == 0 ||
1019 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1))) {
1020 spin_unlock(&ci->i_ceph_lock);
1034 spin_lock(&ci->i_ceph_lock);
1042 xattr = __get_xattr(ci, name);
1059 ci->i_ceph_flags |= CEPH_I_SEC_INITED;
1061 spin_unlock(&ci->i_ceph_lock);
1069 struct ceph_inode_info *ci = ceph_inode(inode);
1074 spin_lock(&ci->i_ceph_lock);
1076 ceph_vinop(inode), ci->i_xattrs.version,
1077 ci->i_xattrs.index_version);
1079 if (ci->i_xattrs.version == 0 ||
1080 !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
1081 spin_unlock(&ci->i_ceph_lock);
1085 spin_lock(&ci->i_ceph_lock);
1093 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
1099 names = __copy_xattr_names(ci, names);
1104 spin_unlock(&ci->i_ceph_lock);
1113 struct ceph_inode_info *ci = ceph_inode(inode);
1166 doutc(cl, "xattr.ver (before): %lld\n", ci->i_xattrs.version);
1169 doutc(cl, "xattr.ver (after): %lld\n", ci->i_xattrs.version);
1182 struct ceph_inode_info *ci = ceph_inode(inode);
1233 spin_lock(&ci->i_ceph_lock);
1235 issued = __ceph_caps_issued(ci, NULL);
1236 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
1237 if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
1240 ci->i_xattrs.version, required_blob_size,
1245 if (!lock_snap_rwsem && !ci->i_head_snapc) {
1248 spin_unlock(&ci->i_ceph_lock);
1250 spin_lock(&ci->i_ceph_lock);
1259 if (!ci->i_xattrs.prealloc_blob ||
1260 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
1263 spin_unlock(&ci->i_ceph_lock);
1270 spin_lock(&ci->i_ceph_lock);
1272 if (ci->i_xattrs.prealloc_blob)
1273 old_blob = ci->i_xattrs.prealloc_blob;
1274 ci->i_xattrs.prealloc_blob = blob;
1278 err = __set_xattr(ci, newname, name_len, newval, val_len,
1282 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
1284 ci->i_xattrs.dirty = true;
1288 spin_unlock(&ci->i_ceph_lock);
1298 spin_unlock(&ci->i_ceph_lock);
1313 spin_lock(&ci->i_ceph_lock);
1314 if ((ci->i_max_files || ci->i_max_bytes) &&
1315 !(ci->i_snap_realm &&
1316 ci->i_snap_realm->ino == ci->i_vino.ino))
1318 spin_unlock(&ci->i_ceph_lock);
1363 struct ceph_inode_info *ci;
1367 ci = ceph_inode(in);
1368 spin_lock(&ci->i_ceph_lock);
1369 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) &&
1370 !(ci->i_xattrs.version > 0 &&
1371 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0));
1372 spin_unlock(&ci->i_ceph_lock);