1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4
5#include "acl.h"
6#include "xattr.h"
7
8#include <linux/posix_acl.h>
9
10static const char * const acl_types[] = {
11	[ACL_USER_OBJ]	= "user_obj",
12	[ACL_USER]	= "user",
13	[ACL_GROUP_OBJ]	= "group_obj",
14	[ACL_GROUP]	= "group",
15	[ACL_MASK]	= "mask",
16	[ACL_OTHER]	= "other",
17	NULL,
18};
19
20void bch2_acl_to_text(struct printbuf *out, const void *value, size_t size)
21{
22	const void *p, *end = value + size;
23
24	if (!value ||
25	    size < sizeof(bch_acl_header) ||
26	    ((bch_acl_header *)value)->a_version != cpu_to_le32(BCH_ACL_VERSION))
27		return;
28
29	p = value + sizeof(bch_acl_header);
30	while (p < end) {
31		const bch_acl_entry *in = p;
32		unsigned tag = le16_to_cpu(in->e_tag);
33
34		prt_str(out, acl_types[tag]);
35
36		switch (tag) {
37		case ACL_USER_OBJ:
38		case ACL_GROUP_OBJ:
39		case ACL_MASK:
40		case ACL_OTHER:
41			p += sizeof(bch_acl_entry_short);
42			break;
43		case ACL_USER:
44			prt_printf(out, " uid %u", le32_to_cpu(in->e_id));
45			p += sizeof(bch_acl_entry);
46			break;
47		case ACL_GROUP:
48			prt_printf(out, " gid %u", le32_to_cpu(in->e_id));
49			p += sizeof(bch_acl_entry);
50			break;
51		}
52
53		prt_printf(out, " %o", le16_to_cpu(in->e_perm));
54
55		if (p != end)
56			prt_char(out, ' ');
57	}
58}
59
60#ifdef CONFIG_BCACHEFS_POSIX_ACL
61
62#include "fs.h"
63
64#include <linux/fs.h>
65#include <linux/posix_acl_xattr.h>
66#include <linux/sched.h>
67#include <linux/slab.h>
68
69static inline size_t bch2_acl_size(unsigned nr_short, unsigned nr_long)
70{
71	return sizeof(bch_acl_header) +
72		sizeof(bch_acl_entry_short) * nr_short +
73		sizeof(bch_acl_entry) * nr_long;
74}
75
76static inline int acl_to_xattr_type(int type)
77{
78	switch (type) {
79	case ACL_TYPE_ACCESS:
80		return KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS;
81	case ACL_TYPE_DEFAULT:
82		return KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT;
83	default:
84		BUG();
85	}
86}
87
88/*
89 * Convert from filesystem to in-memory representation.
90 */
91static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans,
92					    const void *value, size_t size)
93{
94	const void *p, *end = value + size;
95	struct posix_acl *acl;
96	struct posix_acl_entry *out;
97	unsigned count = 0;
98	int ret;
99
100	if (!value)
101		return NULL;
102	if (size < sizeof(bch_acl_header))
103		goto invalid;
104	if (((bch_acl_header *)value)->a_version !=
105	    cpu_to_le32(BCH_ACL_VERSION))
106		goto invalid;
107
108	p = value + sizeof(bch_acl_header);
109	while (p < end) {
110		const bch_acl_entry *entry = p;
111
112		if (p + sizeof(bch_acl_entry_short) > end)
113			goto invalid;
114
115		switch (le16_to_cpu(entry->e_tag)) {
116		case ACL_USER_OBJ:
117		case ACL_GROUP_OBJ:
118		case ACL_MASK:
119		case ACL_OTHER:
120			p += sizeof(bch_acl_entry_short);
121			break;
122		case ACL_USER:
123		case ACL_GROUP:
124			p += sizeof(bch_acl_entry);
125			break;
126		default:
127			goto invalid;
128		}
129
130		count++;
131	}
132
133	if (p > end)
134		goto invalid;
135
136	if (!count)
137		return NULL;
138
139	acl = allocate_dropping_locks(trans, ret,
140			posix_acl_alloc(count, _gfp));
141	if (!acl)
142		return ERR_PTR(-ENOMEM);
143	if (ret) {
144		kfree(acl);
145		return ERR_PTR(ret);
146	}
147
148	out = acl->a_entries;
149
150	p = value + sizeof(bch_acl_header);
151	while (p < end) {
152		const bch_acl_entry *in = p;
153
154		out->e_tag  = le16_to_cpu(in->e_tag);
155		out->e_perm = le16_to_cpu(in->e_perm);
156
157		switch (out->e_tag) {
158		case ACL_USER_OBJ:
159		case ACL_GROUP_OBJ:
160		case ACL_MASK:
161		case ACL_OTHER:
162			p += sizeof(bch_acl_entry_short);
163			break;
164		case ACL_USER:
165			out->e_uid = make_kuid(&init_user_ns,
166					       le32_to_cpu(in->e_id));
167			p += sizeof(bch_acl_entry);
168			break;
169		case ACL_GROUP:
170			out->e_gid = make_kgid(&init_user_ns,
171					       le32_to_cpu(in->e_id));
172			p += sizeof(bch_acl_entry);
173			break;
174		}
175
176		out++;
177	}
178
179	BUG_ON(out != acl->a_entries + acl->a_count);
180
181	return acl;
182invalid:
183	pr_err("invalid acl entry");
184	return ERR_PTR(-EINVAL);
185}
186
187#define acl_for_each_entry(acl, acl_e)			\
188	for (acl_e = acl->a_entries;			\
189	     acl_e < acl->a_entries + acl->a_count;	\
190	     acl_e++)
191
192/*
193 * Convert from in-memory to filesystem representation.
194 */
195static struct bkey_i_xattr *
196bch2_acl_to_xattr(struct btree_trans *trans,
197		  const struct posix_acl *acl,
198		  int type)
199{
200	struct bkey_i_xattr *xattr;
201	bch_acl_header *acl_header;
202	const struct posix_acl_entry *acl_e;
203	void *outptr;
204	unsigned nr_short = 0, nr_long = 0, acl_len, u64s;
205
206	acl_for_each_entry(acl, acl_e) {
207		switch (acl_e->e_tag) {
208		case ACL_USER:
209		case ACL_GROUP:
210			nr_long++;
211			break;
212		case ACL_USER_OBJ:
213		case ACL_GROUP_OBJ:
214		case ACL_MASK:
215		case ACL_OTHER:
216			nr_short++;
217			break;
218		default:
219			return ERR_PTR(-EINVAL);
220		}
221	}
222
223	acl_len = bch2_acl_size(nr_short, nr_long);
224	u64s = BKEY_U64s + xattr_val_u64s(0, acl_len);
225
226	if (u64s > U8_MAX)
227		return ERR_PTR(-E2BIG);
228
229	xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
230	if (IS_ERR(xattr))
231		return xattr;
232
233	bkey_xattr_init(&xattr->k_i);
234	xattr->k.u64s		= u64s;
235	xattr->v.x_type		= acl_to_xattr_type(type);
236	xattr->v.x_name_len	= 0;
237	xattr->v.x_val_len	= cpu_to_le16(acl_len);
238
239	acl_header = xattr_val(&xattr->v);
240	acl_header->a_version = cpu_to_le32(BCH_ACL_VERSION);
241
242	outptr = (void *) acl_header + sizeof(*acl_header);
243
244	acl_for_each_entry(acl, acl_e) {
245		bch_acl_entry *entry = outptr;
246
247		entry->e_tag = cpu_to_le16(acl_e->e_tag);
248		entry->e_perm = cpu_to_le16(acl_e->e_perm);
249		switch (acl_e->e_tag) {
250		case ACL_USER:
251			entry->e_id = cpu_to_le32(
252				from_kuid(&init_user_ns, acl_e->e_uid));
253			outptr += sizeof(bch_acl_entry);
254			break;
255		case ACL_GROUP:
256			entry->e_id = cpu_to_le32(
257				from_kgid(&init_user_ns, acl_e->e_gid));
258			outptr += sizeof(bch_acl_entry);
259			break;
260
261		case ACL_USER_OBJ:
262		case ACL_GROUP_OBJ:
263		case ACL_MASK:
264		case ACL_OTHER:
265			outptr += sizeof(bch_acl_entry_short);
266			break;
267		}
268	}
269
270	BUG_ON(outptr != xattr_val(&xattr->v) + acl_len);
271
272	return xattr;
273}
274
275struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
276			       struct dentry *dentry, int type)
277{
278	struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
279	struct bch_fs *c = inode->v.i_sb->s_fs_info;
280	struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
281	struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
282	struct btree_trans *trans = bch2_trans_get(c);
283	struct btree_iter iter = { NULL };
284	struct posix_acl *acl = NULL;
285	struct bkey_s_c k;
286	int ret;
287retry:
288	bch2_trans_begin(trans);
289
290	ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
291			&hash, inode_inum(inode), &search, 0);
292	if (ret)
293		goto err;
294
295	k = bch2_btree_iter_peek_slot(&iter);
296	ret = bkey_err(k);
297	if (ret)
298		goto err;
299
300	struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
301	acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
302				 le16_to_cpu(xattr.v->x_val_len));
303	ret = PTR_ERR_OR_ZERO(acl);
304err:
305	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
306		goto retry;
307
308	if (ret)
309		acl = !bch2_err_matches(ret, ENOENT) ? ERR_PTR(ret) : NULL;
310
311	if (!IS_ERR_OR_NULL(acl))
312		set_cached_acl(&inode->v, type, acl);
313
314	bch2_trans_iter_exit(trans, &iter);
315	bch2_trans_put(trans);
316	return acl;
317}
318
319int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
320		       struct bch_inode_unpacked *inode_u,
321		       struct posix_acl *acl, int type)
322{
323	struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode_u);
324	int ret;
325
326	if (type == ACL_TYPE_DEFAULT &&
327	    !S_ISDIR(inode_u->bi_mode))
328		return acl ? -EACCES : 0;
329
330	if (acl) {
331		struct bkey_i_xattr *xattr =
332			bch2_acl_to_xattr(trans, acl, type);
333		if (IS_ERR(xattr))
334			return PTR_ERR(xattr);
335
336		ret = bch2_hash_set(trans, bch2_xattr_hash_desc, &hash_info,
337				    inum, &xattr->k_i, 0);
338	} else {
339		struct xattr_search_key search =
340			X_SEARCH(acl_to_xattr_type(type), "", 0);
341
342		ret = bch2_hash_delete(trans, bch2_xattr_hash_desc, &hash_info,
343				       inum, &search);
344	}
345
346	return bch2_err_matches(ret, ENOENT) ? 0 : ret;
347}
348
349int bch2_set_acl(struct mnt_idmap *idmap,
350		 struct dentry *dentry,
351		 struct posix_acl *_acl, int type)
352{
353	struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
354	struct bch_fs *c = inode->v.i_sb->s_fs_info;
355	struct btree_trans *trans = bch2_trans_get(c);
356	struct btree_iter inode_iter = { NULL };
357	struct bch_inode_unpacked inode_u;
358	struct posix_acl *acl;
359	umode_t mode;
360	int ret;
361
362	mutex_lock(&inode->ei_update_lock);
363retry:
364	bch2_trans_begin(trans);
365	acl = _acl;
366
367	ret   = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
368		bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
369			      BTREE_ITER_INTENT);
370	if (ret)
371		goto btree_err;
372
373	mode = inode_u.bi_mode;
374
375	if (type == ACL_TYPE_ACCESS) {
376		ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
377		if (ret)
378			goto btree_err;
379	}
380
381	ret = bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type);
382	if (ret)
383		goto btree_err;
384
385	inode_u.bi_ctime	= bch2_current_time(c);
386	inode_u.bi_mode		= mode;
387
388	ret =   bch2_inode_write(trans, &inode_iter, &inode_u) ?:
389		bch2_trans_commit(trans, NULL, NULL, 0);
390btree_err:
391	bch2_trans_iter_exit(trans, &inode_iter);
392
393	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
394		goto retry;
395	if (unlikely(ret))
396		goto err;
397
398	bch2_inode_update_after_write(trans, inode, &inode_u,
399				      ATTR_CTIME|ATTR_MODE);
400
401	set_cached_acl(&inode->v, type, acl);
402err:
403	mutex_unlock(&inode->ei_update_lock);
404	bch2_trans_put(trans);
405
406	return ret;
407}
408
409int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
410		   struct bch_inode_unpacked *inode,
411		   umode_t mode,
412		   struct posix_acl **new_acl)
413{
414	struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
415	struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
416	struct btree_iter iter;
417	struct bkey_s_c_xattr xattr;
418	struct bkey_i_xattr *new;
419	struct posix_acl *acl = NULL;
420	struct bkey_s_c k;
421	int ret;
422
423	ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
424			       &hash_info, inum, &search, BTREE_ITER_INTENT);
425	if (ret)
426		return bch2_err_matches(ret, ENOENT) ? 0 : ret;
427
428	k = bch2_btree_iter_peek_slot(&iter);
429	ret = bkey_err(k);
430	if (ret)
431		goto err;
432	xattr = bkey_s_c_to_xattr(k);
433
434	acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
435			le16_to_cpu(xattr.v->x_val_len));
436	ret = PTR_ERR_OR_ZERO(acl);
437	if (IS_ERR_OR_NULL(acl))
438		goto err;
439
440	ret = allocate_dropping_locks_errcode(trans,
441				__posix_acl_chmod(&acl, _gfp, mode));
442	if (ret)
443		goto err;
444
445	new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
446	if (IS_ERR(new)) {
447		ret = PTR_ERR(new);
448		goto err;
449	}
450
451	new->k.p = iter.pos;
452	ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
453	*new_acl = acl;
454	acl = NULL;
455err:
456	bch2_trans_iter_exit(trans, &iter);
457	if (!IS_ERR_OR_NULL(acl))
458		kfree(acl);
459	return ret;
460}
461
462#endif /* CONFIG_BCACHEFS_POSIX_ACL */
463