Lines Matching refs:objcg

265 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
270 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
277 * objcg->nr_charged_bytes can't have an arbitrary byte value.
281 * 1) CPU0: objcg == stock->cached_objcg
286 * objcg->nr_charged_bytes = PAGE_SIZE - 92
290 * 92 bytes are added to objcg->nr_charged_bytes
295 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
300 obj_cgroup_uncharge_pages(objcg, nr_pages);
303 list_del(&objcg->list);
307 kfree_rcu(objcg, rcu);
312 struct obj_cgroup *objcg;
315 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
316 if (!objcg)
319 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
322 kfree(objcg);
325 INIT_LIST_HEAD(&objcg->list);
326 return objcg;
332 struct obj_cgroup *objcg, *iter;
334 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
338 /* 1) Ready to reparent active objcg. */
339 list_add(&objcg->list, &memcg->objcg_list);
340 /* 2) Reparent active objcg and already reparented objcgs to parent. */
348 percpu_ref_kill(&objcg->refcnt);
2982 * The allocated objcg pointers array is not accounted directly.
2993 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
3001 memcg = obj_cgroup_memcg(objcg);
3032 * objcg vector should be reused.
3126 struct obj_cgroup *objcg = NULL;
3129 objcg = rcu_dereference(memcg->objcg);
3130 if (likely(objcg && obj_cgroup_tryget(objcg)))
3132 objcg = NULL;
3134 return objcg;
3140 struct obj_cgroup *old, *objcg = NULL;
3144 old = xchg(&current->objcg, NULL);
3154 /* If new objcg is NULL, no reason for the second atomic update. */
3159 * Release the objcg pointer from the previous iteration,
3162 if (unlikely(objcg)) {
3163 obj_cgroup_put(objcg);
3164 objcg = NULL;
3168 * Obtain the new objcg pointer. The current task can be
3171 * and try get a reference to objcg under a rcu read lock.
3176 objcg = __get_obj_cgroup_from_memcg(memcg);
3180 * Try set up a new objcg pointer atomically. If it
3184 } while (!try_cmpxchg(&current->objcg, &old, objcg));
3186 return objcg;
3192 struct obj_cgroup *objcg;
3199 objcg = READ_ONCE(current->objcg);
3200 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3201 objcg = current_objcg_update();
3204 * to use the objcg by the current task.
3206 return objcg;
3216 objcg = NULL;
3220 * and is pinning the corresponding objcg, so objcg can't go
3224 objcg = rcu_dereference_check(memcg->objcg, 1);
3225 if (likely(objcg))
3229 return objcg;
3234 struct obj_cgroup *objcg;
3240 objcg = __folio_objcg(folio);
3241 obj_cgroup_get(objcg);
3248 objcg = __get_obj_cgroup_from_memcg(memcg);
3250 objcg = NULL;
3253 return objcg;
3269 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3270 * @objcg: object cgroup to uncharge
3273 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3278 memcg = get_mem_cgroup_from_objcg(objcg);
3287 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3288 * @objcg: object cgroup to charge
3294 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3300 memcg = get_mem_cgroup_from_objcg(objcg);
3323 struct obj_cgroup *objcg;
3326 objcg = current_obj_cgroup();
3327 if (objcg) {
3328 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3330 obj_cgroup_get(objcg);
3331 page->memcg_data = (unsigned long)objcg |
3347 struct obj_cgroup *objcg;
3353 objcg = __folio_objcg(folio);
3354 obj_cgroup_uncharge_pages(objcg, nr_pages);
3356 obj_cgroup_put(objcg);
3359 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3375 if (READ_ONCE(stock->cached_objcg) != objcg) {
3377 obj_cgroup_get(objcg);
3378 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3379 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3380 WRITE_ONCE(stock->cached_objcg, objcg);
3387 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3392 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3418 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3425 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3434 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3510 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3513 if (objcg) {
3514 memcg = obj_cgroup_memcg(objcg);
3522 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3533 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3535 obj_cgroup_get(objcg);
3536 WRITE_ONCE(stock->cached_objcg, objcg);
3537 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3538 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3539 allow_uncharge = true; /* Allow uncharge when objcg changes */
3553 obj_cgroup_uncharge_pages(objcg, nr_pages);
3556 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3561 if (consume_obj_stock(objcg, size))
3565 * In theory, objcg->nr_charged_bytes can have enough
3567 * flushing objcg->nr_charged_bytes requires two atomic
3568 * operations, and objcg->nr_charged_bytes can't be big.
3569 * The shared objcg->nr_charged_bytes can also become a
3573 * objcg->nr_charged_bytes later on when objcg changes.
3593 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3595 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3600 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3602 refill_obj_stock(objcg, size, true);
3949 struct obj_cgroup *objcg;
3957 objcg = obj_cgroup_alloc();
3958 if (!objcg)
3961 objcg->memcg = memcg;
3962 rcu_assign_pointer(memcg->objcg, objcg);
3963 obj_cgroup_get(objcg);
3964 memcg->orig_objcg = objcg;
6609 * Set the update flag to cause task->objcg to be initialized lazily
6614 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6619 struct obj_cgroup *objcg = task->objcg;
6621 objcg = (struct obj_cgroup *)
6622 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6623 if (objcg)
6624 obj_cgroup_put(objcg);
6632 task->objcg = NULL;
6666 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
7448 struct obj_cgroup *objcg;
7454 * folio memcg or objcg at this point, we have fully
7458 objcg = __folio_objcg(folio);
7463 memcg = get_mem_cgroup_from_objcg(objcg);
7490 obj_cgroup_put(objcg);
8124 * @objcg: the object cgroup
8134 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8142 original_memcg = get_mem_cgroup_from_objcg(objcg);
8172 * @objcg: the object cgroup
8178 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8188 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8192 memcg = obj_cgroup_memcg(objcg);
8200 * @objcg: the object cgroup
8205 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8212 obj_cgroup_uncharge(objcg, size);
8215 memcg = obj_cgroup_memcg(objcg);