Lines Matching defs:prdev

61 __page_reporting_request(struct page_reporting_dev_info *prdev)
66 state = atomic_read(&prdev->state);
74 state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
83 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
86 /* notify prdev of free page reporting request */
89 struct page_reporting_dev_info *prdev;
97 prdev = rcu_dereference(pr_dev_info);
98 if (likely(prdev))
99 __page_reporting_request(prdev);
105 page_reporting_drain(struct page_reporting_dev_info *prdev,
146 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
194 atomic_set(&prdev->state, PAGE_REPORTING_REQUESTED);
225 err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY);
237 page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err);
260 page_reporting_process_zone(struct page_reporting_dev_info *prdev,
285 err = page_reporting_cycle(prdev, zone, order, mt,
296 err = prdev->report(prdev, sgl, leftover);
300 page_reporting_drain(prdev, sgl, leftover, !err);
310 struct page_reporting_dev_info *prdev =
322 atomic_set(&prdev->state, state);
332 err = page_reporting_process_zone(prdev, sgl, zone);
344 state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
346 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
352 int page_reporting_register(struct page_reporting_dev_info *prdev)
373 if (prdev->order > 0 && prdev->order <= MAX_PAGE_ORDER)
374 page_reporting_order = prdev->order;
380 atomic_set(&prdev->state, PAGE_REPORTING_IDLE);
381 INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
384 __page_reporting_request(prdev);
387 rcu_assign_pointer(pr_dev_info, prdev);
401 void page_reporting_unregister(struct page_reporting_dev_info *prdev)
405 if (prdev == rcu_dereference_protected(pr_dev_info,
412 cancel_delayed_work_sync(&prdev->work);