1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/devres.c - device resource management
4 *
5 * Copyright (c) 2006  SUSE Linux Products GmbH
6 * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7 */
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/percpu.h>
13
14#include <asm/sections.h>
15
16#include "base.h"
17#include "trace.h"
18
19struct devres_node {
20	struct list_head		entry;
21	dr_release_t			release;
22	const char			*name;
23	size_t				size;
24};
25
26struct devres {
27	struct devres_node		node;
28	/*
29	 * Some archs want to perform DMA into kmalloc caches
30	 * and need a guaranteed alignment larger than
31	 * the alignment of a 64-bit integer.
32	 * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
33	 * alignment for struct devres when allocated by kmalloc().
34	 */
35	u8 __aligned(ARCH_DMA_MINALIGN) data[];
36};
37
38struct devres_group {
39	struct devres_node		node[2];
40	void				*id;
41	int				color;
42	/* -- 8 pointers */
43};
44
45static void set_node_dbginfo(struct devres_node *node, const char *name,
46			     size_t size)
47{
48	node->name = name;
49	node->size = size;
50}
51
52#ifdef CONFIG_DEBUG_DEVRES
53static int log_devres = 0;
54module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
55
56static void devres_dbg(struct device *dev, struct devres_node *node,
57		       const char *op)
58{
59	if (unlikely(log_devres))
60		dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
61			op, node, node->name, node->size);
62}
63#else /* CONFIG_DEBUG_DEVRES */
64#define devres_dbg(dev, node, op)	do {} while (0)
65#endif /* CONFIG_DEBUG_DEVRES */
66
67static void devres_log(struct device *dev, struct devres_node *node,
68		       const char *op)
69{
70	trace_devres_log(dev, op, node, node->name, node->size);
71	devres_dbg(dev, node, op);
72}
73
74/*
75 * Release functions for devres group.  These callbacks are used only
76 * for identification.
77 */
78static void group_open_release(struct device *dev, void *res)
79{
80	/* noop */
81}
82
83static void group_close_release(struct device *dev, void *res)
84{
85	/* noop */
86}
87
88static struct devres_group * node_to_group(struct devres_node *node)
89{
90	if (node->release == &group_open_release)
91		return container_of(node, struct devres_group, node[0]);
92	if (node->release == &group_close_release)
93		return container_of(node, struct devres_group, node[1]);
94	return NULL;
95}
96
97static bool check_dr_size(size_t size, size_t *tot_size)
98{
99	/* We must catch any near-SIZE_MAX cases that could overflow. */
100	if (unlikely(check_add_overflow(sizeof(struct devres),
101					size, tot_size)))
102		return false;
103
104	/* Actually allocate the full kmalloc bucket size. */
105	*tot_size = kmalloc_size_roundup(*tot_size);
106
107	return true;
108}
109
110static __always_inline struct devres * alloc_dr(dr_release_t release,
111						size_t size, gfp_t gfp, int nid)
112{
113	size_t tot_size;
114	struct devres *dr;
115
116	if (!check_dr_size(size, &tot_size))
117		return NULL;
118
119	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
120	if (unlikely(!dr))
121		return NULL;
122
123	/* No need to clear memory twice */
124	if (!(gfp & __GFP_ZERO))
125		memset(dr, 0, offsetof(struct devres, data));
126
127	INIT_LIST_HEAD(&dr->node.entry);
128	dr->node.release = release;
129	return dr;
130}
131
132static void add_dr(struct device *dev, struct devres_node *node)
133{
134	devres_log(dev, node, "ADD");
135	BUG_ON(!list_empty(&node->entry));
136	list_add_tail(&node->entry, &dev->devres_head);
137}
138
139static void replace_dr(struct device *dev,
140		       struct devres_node *old, struct devres_node *new)
141{
142	devres_log(dev, old, "REPLACE");
143	BUG_ON(!list_empty(&new->entry));
144	list_replace(&old->entry, &new->entry);
145}
146
147/**
148 * __devres_alloc_node - Allocate device resource data
149 * @release: Release function devres will be associated with
150 * @size: Allocation size
151 * @gfp: Allocation flags
152 * @nid: NUMA node
153 * @name: Name of the resource
154 *
155 * Allocate devres of @size bytes.  The allocated area is zeroed, then
156 * associated with @release.  The returned pointer can be passed to
157 * other devres_*() functions.
158 *
159 * RETURNS:
160 * Pointer to allocated devres on success, NULL on failure.
161 */
162void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
163			  const char *name)
164{
165	struct devres *dr;
166
167	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
168	if (unlikely(!dr))
169		return NULL;
170	set_node_dbginfo(&dr->node, name, size);
171	return dr->data;
172}
173EXPORT_SYMBOL_GPL(__devres_alloc_node);
174
175/**
176 * devres_for_each_res - Resource iterator
177 * @dev: Device to iterate resource from
178 * @release: Look for resources associated with this release function
179 * @match: Match function (optional)
180 * @match_data: Data for the match function
181 * @fn: Function to be called for each matched resource.
182 * @data: Data for @fn, the 3rd parameter of @fn
183 *
184 * Call @fn for each devres of @dev which is associated with @release
185 * and for which @match returns 1.
186 *
187 * RETURNS:
188 * 	void
189 */
190void devres_for_each_res(struct device *dev, dr_release_t release,
191			dr_match_t match, void *match_data,
192			void (*fn)(struct device *, void *, void *),
193			void *data)
194{
195	struct devres_node *node;
196	struct devres_node *tmp;
197	unsigned long flags;
198
199	if (!fn)
200		return;
201
202	spin_lock_irqsave(&dev->devres_lock, flags);
203	list_for_each_entry_safe_reverse(node, tmp,
204			&dev->devres_head, entry) {
205		struct devres *dr = container_of(node, struct devres, node);
206
207		if (node->release != release)
208			continue;
209		if (match && !match(dev, dr->data, match_data))
210			continue;
211		fn(dev, dr->data, data);
212	}
213	spin_unlock_irqrestore(&dev->devres_lock, flags);
214}
215EXPORT_SYMBOL_GPL(devres_for_each_res);
216
217/**
218 * devres_free - Free device resource data
219 * @res: Pointer to devres data to free
220 *
221 * Free devres created with devres_alloc().
222 */
223void devres_free(void *res)
224{
225	if (res) {
226		struct devres *dr = container_of(res, struct devres, data);
227
228		BUG_ON(!list_empty(&dr->node.entry));
229		kfree(dr);
230	}
231}
232EXPORT_SYMBOL_GPL(devres_free);
233
234/**
235 * devres_add - Register device resource
236 * @dev: Device to add resource to
237 * @res: Resource to register
238 *
239 * Register devres @res to @dev.  @res should have been allocated
240 * using devres_alloc().  On driver detach, the associated release
241 * function will be invoked and devres will be freed automatically.
242 */
243void devres_add(struct device *dev, void *res)
244{
245	struct devres *dr = container_of(res, struct devres, data);
246	unsigned long flags;
247
248	spin_lock_irqsave(&dev->devres_lock, flags);
249	add_dr(dev, &dr->node);
250	spin_unlock_irqrestore(&dev->devres_lock, flags);
251}
252EXPORT_SYMBOL_GPL(devres_add);
253
254static struct devres *find_dr(struct device *dev, dr_release_t release,
255			      dr_match_t match, void *match_data)
256{
257	struct devres_node *node;
258
259	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
260		struct devres *dr = container_of(node, struct devres, node);
261
262		if (node->release != release)
263			continue;
264		if (match && !match(dev, dr->data, match_data))
265			continue;
266		return dr;
267	}
268
269	return NULL;
270}
271
272/**
273 * devres_find - Find device resource
274 * @dev: Device to lookup resource from
275 * @release: Look for resources associated with this release function
276 * @match: Match function (optional)
277 * @match_data: Data for the match function
278 *
279 * Find the latest devres of @dev which is associated with @release
280 * and for which @match returns 1.  If @match is NULL, it's considered
281 * to match all.
282 *
283 * RETURNS:
284 * Pointer to found devres, NULL if not found.
285 */
286void * devres_find(struct device *dev, dr_release_t release,
287		   dr_match_t match, void *match_data)
288{
289	struct devres *dr;
290	unsigned long flags;
291
292	spin_lock_irqsave(&dev->devres_lock, flags);
293	dr = find_dr(dev, release, match, match_data);
294	spin_unlock_irqrestore(&dev->devres_lock, flags);
295
296	if (dr)
297		return dr->data;
298	return NULL;
299}
300EXPORT_SYMBOL_GPL(devres_find);
301
302/**
303 * devres_get - Find devres, if non-existent, add one atomically
304 * @dev: Device to lookup or add devres for
305 * @new_res: Pointer to new initialized devres to add if not found
306 * @match: Match function (optional)
307 * @match_data: Data for the match function
308 *
309 * Find the latest devres of @dev which has the same release function
310 * as @new_res and for which @match return 1.  If found, @new_res is
311 * freed; otherwise, @new_res is added atomically.
312 *
313 * RETURNS:
314 * Pointer to found or added devres.
315 */
316void * devres_get(struct device *dev, void *new_res,
317		  dr_match_t match, void *match_data)
318{
319	struct devres *new_dr = container_of(new_res, struct devres, data);
320	struct devres *dr;
321	unsigned long flags;
322
323	spin_lock_irqsave(&dev->devres_lock, flags);
324	dr = find_dr(dev, new_dr->node.release, match, match_data);
325	if (!dr) {
326		add_dr(dev, &new_dr->node);
327		dr = new_dr;
328		new_res = NULL;
329	}
330	spin_unlock_irqrestore(&dev->devres_lock, flags);
331	devres_free(new_res);
332
333	return dr->data;
334}
335EXPORT_SYMBOL_GPL(devres_get);
336
337/**
338 * devres_remove - Find a device resource and remove it
339 * @dev: Device to find resource from
340 * @release: Look for resources associated with this release function
341 * @match: Match function (optional)
342 * @match_data: Data for the match function
343 *
344 * Find the latest devres of @dev associated with @release and for
345 * which @match returns 1.  If @match is NULL, it's considered to
346 * match all.  If found, the resource is removed atomically and
347 * returned.
348 *
349 * RETURNS:
350 * Pointer to removed devres on success, NULL if not found.
351 */
352void * devres_remove(struct device *dev, dr_release_t release,
353		     dr_match_t match, void *match_data)
354{
355	struct devres *dr;
356	unsigned long flags;
357
358	spin_lock_irqsave(&dev->devres_lock, flags);
359	dr = find_dr(dev, release, match, match_data);
360	if (dr) {
361		list_del_init(&dr->node.entry);
362		devres_log(dev, &dr->node, "REM");
363	}
364	spin_unlock_irqrestore(&dev->devres_lock, flags);
365
366	if (dr)
367		return dr->data;
368	return NULL;
369}
370EXPORT_SYMBOL_GPL(devres_remove);
371
372/**
373 * devres_destroy - Find a device resource and destroy it
374 * @dev: Device to find resource from
375 * @release: Look for resources associated with this release function
376 * @match: Match function (optional)
377 * @match_data: Data for the match function
378 *
379 * Find the latest devres of @dev associated with @release and for
380 * which @match returns 1.  If @match is NULL, it's considered to
381 * match all.  If found, the resource is removed atomically and freed.
382 *
383 * Note that the release function for the resource will not be called,
384 * only the devres-allocated data will be freed.  The caller becomes
385 * responsible for freeing any other data.
386 *
387 * RETURNS:
388 * 0 if devres is found and freed, -ENOENT if not found.
389 */
390int devres_destroy(struct device *dev, dr_release_t release,
391		   dr_match_t match, void *match_data)
392{
393	void *res;
394
395	res = devres_remove(dev, release, match, match_data);
396	if (unlikely(!res))
397		return -ENOENT;
398
399	devres_free(res);
400	return 0;
401}
402EXPORT_SYMBOL_GPL(devres_destroy);
403
404
405/**
406 * devres_release - Find a device resource and destroy it, calling release
407 * @dev: Device to find resource from
408 * @release: Look for resources associated with this release function
409 * @match: Match function (optional)
410 * @match_data: Data for the match function
411 *
412 * Find the latest devres of @dev associated with @release and for
413 * which @match returns 1.  If @match is NULL, it's considered to
414 * match all.  If found, the resource is removed atomically, the
415 * release function called and the resource freed.
416 *
417 * RETURNS:
418 * 0 if devres is found and freed, -ENOENT if not found.
419 */
420int devres_release(struct device *dev, dr_release_t release,
421		   dr_match_t match, void *match_data)
422{
423	void *res;
424
425	res = devres_remove(dev, release, match, match_data);
426	if (unlikely(!res))
427		return -ENOENT;
428
429	(*release)(dev, res);
430	devres_free(res);
431	return 0;
432}
433EXPORT_SYMBOL_GPL(devres_release);
434
435static int remove_nodes(struct device *dev,
436			struct list_head *first, struct list_head *end,
437			struct list_head *todo)
438{
439	struct devres_node *node, *n;
440	int cnt = 0, nr_groups = 0;
441
442	/* First pass - move normal devres entries to @todo and clear
443	 * devres_group colors.
444	 */
445	node = list_entry(first, struct devres_node, entry);
446	list_for_each_entry_safe_from(node, n, end, entry) {
447		struct devres_group *grp;
448
449		grp = node_to_group(node);
450		if (grp) {
451			/* clear color of group markers in the first pass */
452			grp->color = 0;
453			nr_groups++;
454		} else {
455			/* regular devres entry */
456			if (&node->entry == first)
457				first = first->next;
458			list_move_tail(&node->entry, todo);
459			cnt++;
460		}
461	}
462
463	if (!nr_groups)
464		return cnt;
465
466	/* Second pass - Scan groups and color them.  A group gets
467	 * color value of two iff the group is wholly contained in
468	 * [current node, end). That is, for a closed group, both opening
469	 * and closing markers should be in the range, while just the
470	 * opening marker is enough for an open group.
471	 */
472	node = list_entry(first, struct devres_node, entry);
473	list_for_each_entry_safe_from(node, n, end, entry) {
474		struct devres_group *grp;
475
476		grp = node_to_group(node);
477		BUG_ON(!grp || list_empty(&grp->node[0].entry));
478
479		grp->color++;
480		if (list_empty(&grp->node[1].entry))
481			grp->color++;
482
483		BUG_ON(grp->color <= 0 || grp->color > 2);
484		if (grp->color == 2) {
485			/* No need to update current node or end. The removed
486			 * nodes are always before both.
487			 */
488			list_move_tail(&grp->node[0].entry, todo);
489			list_del_init(&grp->node[1].entry);
490		}
491	}
492
493	return cnt;
494}
495
496static void release_nodes(struct device *dev, struct list_head *todo)
497{
498	struct devres *dr, *tmp;
499
500	/* Release.  Note that both devres and devres_group are
501	 * handled as devres in the following loop.  This is safe.
502	 */
503	list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
504		devres_log(dev, &dr->node, "REL");
505		dr->node.release(dev, dr->data);
506		kfree(dr);
507	}
508}
509
510/**
511 * devres_release_all - Release all managed resources
512 * @dev: Device to release resources for
513 *
514 * Release all resources associated with @dev.  This function is
515 * called on driver detach.
516 */
517int devres_release_all(struct device *dev)
518{
519	unsigned long flags;
520	LIST_HEAD(todo);
521	int cnt;
522
523	/* Looks like an uninitialized device structure */
524	if (WARN_ON(dev->devres_head.next == NULL))
525		return -ENODEV;
526
527	/* Nothing to release if list is empty */
528	if (list_empty(&dev->devres_head))
529		return 0;
530
531	spin_lock_irqsave(&dev->devres_lock, flags);
532	cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
533	spin_unlock_irqrestore(&dev->devres_lock, flags);
534
535	release_nodes(dev, &todo);
536	return cnt;
537}
538
539/**
540 * devres_open_group - Open a new devres group
541 * @dev: Device to open devres group for
542 * @id: Separator ID
543 * @gfp: Allocation flags
544 *
545 * Open a new devres group for @dev with @id.  For @id, using a
546 * pointer to an object which won't be used for another group is
547 * recommended.  If @id is NULL, address-wise unique ID is created.
548 *
549 * RETURNS:
550 * ID of the new group, NULL on failure.
551 */
552void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
553{
554	struct devres_group *grp;
555	unsigned long flags;
556
557	grp = kmalloc(sizeof(*grp), gfp);
558	if (unlikely(!grp))
559		return NULL;
560
561	grp->node[0].release = &group_open_release;
562	grp->node[1].release = &group_close_release;
563	INIT_LIST_HEAD(&grp->node[0].entry);
564	INIT_LIST_HEAD(&grp->node[1].entry);
565	set_node_dbginfo(&grp->node[0], "grp<", 0);
566	set_node_dbginfo(&grp->node[1], "grp>", 0);
567	grp->id = grp;
568	if (id)
569		grp->id = id;
570
571	spin_lock_irqsave(&dev->devres_lock, flags);
572	add_dr(dev, &grp->node[0]);
573	spin_unlock_irqrestore(&dev->devres_lock, flags);
574	return grp->id;
575}
576EXPORT_SYMBOL_GPL(devres_open_group);
577
578/* Find devres group with ID @id.  If @id is NULL, look for the latest. */
579static struct devres_group * find_group(struct device *dev, void *id)
580{
581	struct devres_node *node;
582
583	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
584		struct devres_group *grp;
585
586		if (node->release != &group_open_release)
587			continue;
588
589		grp = container_of(node, struct devres_group, node[0]);
590
591		if (id) {
592			if (grp->id == id)
593				return grp;
594		} else if (list_empty(&grp->node[1].entry))
595			return grp;
596	}
597
598	return NULL;
599}
600
601/**
602 * devres_close_group - Close a devres group
603 * @dev: Device to close devres group for
604 * @id: ID of target group, can be NULL
605 *
606 * Close the group identified by @id.  If @id is NULL, the latest open
607 * group is selected.
608 */
609void devres_close_group(struct device *dev, void *id)
610{
611	struct devres_group *grp;
612	unsigned long flags;
613
614	spin_lock_irqsave(&dev->devres_lock, flags);
615
616	grp = find_group(dev, id);
617	if (grp)
618		add_dr(dev, &grp->node[1]);
619	else
620		WARN_ON(1);
621
622	spin_unlock_irqrestore(&dev->devres_lock, flags);
623}
624EXPORT_SYMBOL_GPL(devres_close_group);
625
626/**
627 * devres_remove_group - Remove a devres group
628 * @dev: Device to remove group for
629 * @id: ID of target group, can be NULL
630 *
631 * Remove the group identified by @id.  If @id is NULL, the latest
632 * open group is selected.  Note that removing a group doesn't affect
633 * any other resources.
634 */
635void devres_remove_group(struct device *dev, void *id)
636{
637	struct devres_group *grp;
638	unsigned long flags;
639
640	spin_lock_irqsave(&dev->devres_lock, flags);
641
642	grp = find_group(dev, id);
643	if (grp) {
644		list_del_init(&grp->node[0].entry);
645		list_del_init(&grp->node[1].entry);
646		devres_log(dev, &grp->node[0], "REM");
647	} else
648		WARN_ON(1);
649
650	spin_unlock_irqrestore(&dev->devres_lock, flags);
651
652	kfree(grp);
653}
654EXPORT_SYMBOL_GPL(devres_remove_group);
655
656/**
657 * devres_release_group - Release resources in a devres group
658 * @dev: Device to release group for
659 * @id: ID of target group, can be NULL
660 *
661 * Release all resources in the group identified by @id.  If @id is
662 * NULL, the latest open group is selected.  The selected group and
663 * groups properly nested inside the selected group are removed.
664 *
665 * RETURNS:
666 * The number of released non-group resources.
667 */
668int devres_release_group(struct device *dev, void *id)
669{
670	struct devres_group *grp;
671	unsigned long flags;
672	LIST_HEAD(todo);
673	int cnt = 0;
674
675	spin_lock_irqsave(&dev->devres_lock, flags);
676
677	grp = find_group(dev, id);
678	if (grp) {
679		struct list_head *first = &grp->node[0].entry;
680		struct list_head *end = &dev->devres_head;
681
682		if (!list_empty(&grp->node[1].entry))
683			end = grp->node[1].entry.next;
684
685		cnt = remove_nodes(dev, first, end, &todo);
686		spin_unlock_irqrestore(&dev->devres_lock, flags);
687
688		release_nodes(dev, &todo);
689	} else {
690		WARN_ON(1);
691		spin_unlock_irqrestore(&dev->devres_lock, flags);
692	}
693
694	return cnt;
695}
696EXPORT_SYMBOL_GPL(devres_release_group);
697
698/*
699 * Custom devres actions allow inserting a simple function call
700 * into the teardown sequence.
701 */
702
703struct action_devres {
704	void *data;
705	void (*action)(void *);
706};
707
708static int devm_action_match(struct device *dev, void *res, void *p)
709{
710	struct action_devres *devres = res;
711	struct action_devres *target = p;
712
713	return devres->action == target->action &&
714	       devres->data == target->data;
715}
716
717static void devm_action_release(struct device *dev, void *res)
718{
719	struct action_devres *devres = res;
720
721	devres->action(devres->data);
722}
723
724/**
725 * __devm_add_action() - add a custom action to list of managed resources
726 * @dev: Device that owns the action
727 * @action: Function that should be called
728 * @data: Pointer to data passed to @action implementation
729 * @name: Name of the resource (for debugging purposes)
730 *
731 * This adds a custom action to the list of managed resources so that
732 * it gets executed as part of standard resource unwinding.
733 */
734int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
735{
736	struct action_devres *devres;
737
738	devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
739				     GFP_KERNEL, NUMA_NO_NODE, name);
740	if (!devres)
741		return -ENOMEM;
742
743	devres->data = data;
744	devres->action = action;
745
746	devres_add(dev, devres);
747	return 0;
748}
749EXPORT_SYMBOL_GPL(__devm_add_action);
750
751/**
752 * devm_remove_action() - removes previously added custom action
753 * @dev: Device that owns the action
754 * @action: Function implementing the action
755 * @data: Pointer to data passed to @action implementation
756 *
757 * Removes instance of @action previously added by devm_add_action().
758 * Both action and data should match one of the existing entries.
759 */
760void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
761{
762	struct action_devres devres = {
763		.data = data,
764		.action = action,
765	};
766
767	WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
768			       &devres));
769}
770EXPORT_SYMBOL_GPL(devm_remove_action);
771
772/**
773 * devm_release_action() - release previously added custom action
774 * @dev: Device that owns the action
775 * @action: Function implementing the action
776 * @data: Pointer to data passed to @action implementation
777 *
778 * Releases and removes instance of @action previously added by
779 * devm_add_action().  Both action and data should match one of the
780 * existing entries.
781 */
782void devm_release_action(struct device *dev, void (*action)(void *), void *data)
783{
784	struct action_devres devres = {
785		.data = data,
786		.action = action,
787	};
788
789	WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
790			       &devres));
791
792}
793EXPORT_SYMBOL_GPL(devm_release_action);
794
795/*
796 * Managed kmalloc/kfree
797 */
798static void devm_kmalloc_release(struct device *dev, void *res)
799{
800	/* noop */
801}
802
803static int devm_kmalloc_match(struct device *dev, void *res, void *data)
804{
805	return res == data;
806}
807
808/**
809 * devm_kmalloc - Resource-managed kmalloc
810 * @dev: Device to allocate memory for
811 * @size: Allocation size
812 * @gfp: Allocation gfp flags
813 *
814 * Managed kmalloc.  Memory allocated with this function is
815 * automatically freed on driver detach.  Like all other devres
816 * resources, guaranteed alignment is unsigned long long.
817 *
818 * RETURNS:
819 * Pointer to allocated memory on success, NULL on failure.
820 */
821void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
822{
823	struct devres *dr;
824
825	if (unlikely(!size))
826		return ZERO_SIZE_PTR;
827
828	/* use raw alloc_dr for kmalloc caller tracing */
829	dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
830	if (unlikely(!dr))
831		return NULL;
832
833	/*
834	 * This is named devm_kzalloc_release for historical reasons
835	 * The initial implementation did not support kmalloc, only kzalloc
836	 */
837	set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
838	devres_add(dev, dr->data);
839	return dr->data;
840}
841EXPORT_SYMBOL_GPL(devm_kmalloc);
842
843/**
844 * devm_krealloc - Resource-managed krealloc()
845 * @dev: Device to re-allocate memory for
846 * @ptr: Pointer to the memory chunk to re-allocate
847 * @new_size: New allocation size
848 * @gfp: Allocation gfp flags
849 *
850 * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
851 * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
852 * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
853 * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
854 * change the order in which the release callback for the re-alloc'ed devres
855 * will be called (except when falling back to devm_kmalloc() or when freeing
856 * resources when new_size is zero). The contents of the memory are preserved
857 * up to the lesser of new and old sizes.
858 */
859void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
860{
861	size_t total_new_size, total_old_size;
862	struct devres *old_dr, *new_dr;
863	unsigned long flags;
864
865	if (unlikely(!new_size)) {
866		devm_kfree(dev, ptr);
867		return ZERO_SIZE_PTR;
868	}
869
870	if (unlikely(ZERO_OR_NULL_PTR(ptr)))
871		return devm_kmalloc(dev, new_size, gfp);
872
873	if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
874		/*
875		 * We cannot reliably realloc a const string returned by
876		 * devm_kstrdup_const().
877		 */
878		return NULL;
879
880	if (!check_dr_size(new_size, &total_new_size))
881		return NULL;
882
883	total_old_size = ksize(container_of(ptr, struct devres, data));
884	if (total_old_size == 0) {
885		WARN(1, "Pointer doesn't point to dynamically allocated memory.");
886		return NULL;
887	}
888
889	/*
890	 * If new size is smaller or equal to the actual number of bytes
891	 * allocated previously - just return the same pointer.
892	 */
893	if (total_new_size <= total_old_size)
894		return ptr;
895
896	/*
897	 * Otherwise: allocate new, larger chunk. We need to allocate before
898	 * taking the lock as most probably the caller uses GFP_KERNEL.
899	 */
900	new_dr = alloc_dr(devm_kmalloc_release,
901			  total_new_size, gfp, dev_to_node(dev));
902	if (!new_dr)
903		return NULL;
904
905	/*
906	 * The spinlock protects the linked list against concurrent
907	 * modifications but not the resource itself.
908	 */
909	spin_lock_irqsave(&dev->devres_lock, flags);
910
911	old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
912	if (!old_dr) {
913		spin_unlock_irqrestore(&dev->devres_lock, flags);
914		kfree(new_dr);
915		WARN(1, "Memory chunk not managed or managed by a different device.");
916		return NULL;
917	}
918
919	replace_dr(dev, &old_dr->node, &new_dr->node);
920
921	spin_unlock_irqrestore(&dev->devres_lock, flags);
922
923	/*
924	 * We can copy the memory contents after releasing the lock as we're
925	 * no longer modifying the list links.
926	 */
927	memcpy(new_dr->data, old_dr->data,
928	       total_old_size - offsetof(struct devres, data));
929	/*
930	 * Same for releasing the old devres - it's now been removed from the
931	 * list. This is also the reason why we must not use devm_kfree() - the
932	 * links are no longer valid.
933	 */
934	kfree(old_dr);
935
936	return new_dr->data;
937}
938EXPORT_SYMBOL_GPL(devm_krealloc);
939
940/**
941 * devm_kstrdup - Allocate resource managed space and
942 *                copy an existing string into that.
943 * @dev: Device to allocate memory for
944 * @s: the string to duplicate
945 * @gfp: the GFP mask used in the devm_kmalloc() call when
946 *       allocating memory
947 * RETURNS:
948 * Pointer to allocated string on success, NULL on failure.
949 */
950char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
951{
952	size_t size;
953	char *buf;
954
955	if (!s)
956		return NULL;
957
958	size = strlen(s) + 1;
959	buf = devm_kmalloc(dev, size, gfp);
960	if (buf)
961		memcpy(buf, s, size);
962	return buf;
963}
964EXPORT_SYMBOL_GPL(devm_kstrdup);
965
966/**
967 * devm_kstrdup_const - resource managed conditional string duplication
968 * @dev: device for which to duplicate the string
969 * @s: the string to duplicate
970 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
971 *
972 * Strings allocated by devm_kstrdup_const will be automatically freed when
973 * the associated device is detached.
974 *
975 * RETURNS:
976 * Source string if it is in .rodata section otherwise it falls back to
977 * devm_kstrdup.
978 */
979const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
980{
981	if (is_kernel_rodata((unsigned long)s))
982		return s;
983
984	return devm_kstrdup(dev, s, gfp);
985}
986EXPORT_SYMBOL_GPL(devm_kstrdup_const);
987
988/**
989 * devm_kvasprintf - Allocate resource managed space and format a string
990 *		     into that.
991 * @dev: Device to allocate memory for
992 * @gfp: the GFP mask used in the devm_kmalloc() call when
993 *       allocating memory
994 * @fmt: The printf()-style format string
995 * @ap: Arguments for the format string
996 * RETURNS:
997 * Pointer to allocated string on success, NULL on failure.
998 */
999char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
1000		      va_list ap)
1001{
1002	unsigned int len;
1003	char *p;
1004	va_list aq;
1005
1006	va_copy(aq, ap);
1007	len = vsnprintf(NULL, 0, fmt, aq);
1008	va_end(aq);
1009
1010	p = devm_kmalloc(dev, len+1, gfp);
1011	if (!p)
1012		return NULL;
1013
1014	vsnprintf(p, len+1, fmt, ap);
1015
1016	return p;
1017}
1018EXPORT_SYMBOL(devm_kvasprintf);
1019
1020/**
1021 * devm_kasprintf - Allocate resource managed space and format a string
1022 *		    into that.
1023 * @dev: Device to allocate memory for
1024 * @gfp: the GFP mask used in the devm_kmalloc() call when
1025 *       allocating memory
1026 * @fmt: The printf()-style format string
1027 * @...: Arguments for the format string
1028 * RETURNS:
1029 * Pointer to allocated string on success, NULL on failure.
1030 */
1031char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1032{
1033	va_list ap;
1034	char *p;
1035
1036	va_start(ap, fmt);
1037	p = devm_kvasprintf(dev, gfp, fmt, ap);
1038	va_end(ap);
1039
1040	return p;
1041}
1042EXPORT_SYMBOL_GPL(devm_kasprintf);
1043
1044/**
1045 * devm_kfree - Resource-managed kfree
1046 * @dev: Device this memory belongs to
1047 * @p: Memory to free
1048 *
1049 * Free memory allocated with devm_kmalloc().
1050 */
1051void devm_kfree(struct device *dev, const void *p)
1052{
1053	int rc;
1054
1055	/*
1056	 * Special cases: pointer to a string in .rodata returned by
1057	 * devm_kstrdup_const() or NULL/ZERO ptr.
1058	 */
1059	if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
1060		return;
1061
1062	rc = devres_destroy(dev, devm_kmalloc_release,
1063			    devm_kmalloc_match, (void *)p);
1064	WARN_ON(rc);
1065}
1066EXPORT_SYMBOL_GPL(devm_kfree);
1067
1068/**
1069 * devm_kmemdup - Resource-managed kmemdup
1070 * @dev: Device this memory belongs to
1071 * @src: Memory region to duplicate
1072 * @len: Memory region length
1073 * @gfp: GFP mask to use
1074 *
1075 * Duplicate region of a memory using resource managed kmalloc
1076 */
1077void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
1078{
1079	void *p;
1080
1081	p = devm_kmalloc(dev, len, gfp);
1082	if (p)
1083		memcpy(p, src, len);
1084
1085	return p;
1086}
1087EXPORT_SYMBOL_GPL(devm_kmemdup);
1088
1089struct pages_devres {
1090	unsigned long addr;
1091	unsigned int order;
1092};
1093
1094static int devm_pages_match(struct device *dev, void *res, void *p)
1095{
1096	struct pages_devres *devres = res;
1097	struct pages_devres *target = p;
1098
1099	return devres->addr == target->addr;
1100}
1101
1102static void devm_pages_release(struct device *dev, void *res)
1103{
1104	struct pages_devres *devres = res;
1105
1106	free_pages(devres->addr, devres->order);
1107}
1108
1109/**
1110 * devm_get_free_pages - Resource-managed __get_free_pages
1111 * @dev: Device to allocate memory for
1112 * @gfp_mask: Allocation gfp flags
1113 * @order: Allocation size is (1 << order) pages
1114 *
1115 * Managed get_free_pages.  Memory allocated with this function is
1116 * automatically freed on driver detach.
1117 *
1118 * RETURNS:
1119 * Address of allocated memory on success, 0 on failure.
1120 */
1121
1122unsigned long devm_get_free_pages(struct device *dev,
1123				  gfp_t gfp_mask, unsigned int order)
1124{
1125	struct pages_devres *devres;
1126	unsigned long addr;
1127
1128	addr = __get_free_pages(gfp_mask, order);
1129
1130	if (unlikely(!addr))
1131		return 0;
1132
1133	devres = devres_alloc(devm_pages_release,
1134			      sizeof(struct pages_devres), GFP_KERNEL);
1135	if (unlikely(!devres)) {
1136		free_pages(addr, order);
1137		return 0;
1138	}
1139
1140	devres->addr = addr;
1141	devres->order = order;
1142
1143	devres_add(dev, devres);
1144	return addr;
1145}
1146EXPORT_SYMBOL_GPL(devm_get_free_pages);
1147
1148/**
1149 * devm_free_pages - Resource-managed free_pages
1150 * @dev: Device this memory belongs to
1151 * @addr: Memory to free
1152 *
1153 * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1154 * there is no need to supply the @order.
1155 */
1156void devm_free_pages(struct device *dev, unsigned long addr)
1157{
1158	struct pages_devres devres = { .addr = addr };
1159
1160	WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1161			       &devres));
1162}
1163EXPORT_SYMBOL_GPL(devm_free_pages);
1164
1165static void devm_percpu_release(struct device *dev, void *pdata)
1166{
1167	void __percpu *p;
1168
1169	p = *(void __percpu **)pdata;
1170	free_percpu(p);
1171}
1172
1173static int devm_percpu_match(struct device *dev, void *data, void *p)
1174{
1175	struct devres *devr = container_of(data, struct devres, data);
1176
1177	return *(void **)devr->data == p;
1178}
1179
1180/**
1181 * __devm_alloc_percpu - Resource-managed alloc_percpu
1182 * @dev: Device to allocate per-cpu memory for
1183 * @size: Size of per-cpu memory to allocate
1184 * @align: Alignment of per-cpu memory to allocate
1185 *
1186 * Managed alloc_percpu. Per-cpu memory allocated with this function is
1187 * automatically freed on driver detach.
1188 *
1189 * RETURNS:
1190 * Pointer to allocated memory on success, NULL on failure.
1191 */
1192void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1193		size_t align)
1194{
1195	void *p;
1196	void __percpu *pcpu;
1197
1198	pcpu = __alloc_percpu(size, align);
1199	if (!pcpu)
1200		return NULL;
1201
1202	p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1203	if (!p) {
1204		free_percpu(pcpu);
1205		return NULL;
1206	}
1207
1208	*(void __percpu **)p = pcpu;
1209
1210	devres_add(dev, p);
1211
1212	return pcpu;
1213}
1214EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1215
1216/**
1217 * devm_free_percpu - Resource-managed free_percpu
1218 * @dev: Device this memory belongs to
1219 * @pdata: Per-cpu memory to free
1220 *
1221 * Free memory allocated with devm_alloc_percpu().
1222 */
1223void devm_free_percpu(struct device *dev, void __percpu *pdata)
1224{
1225	WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1226			       (__force void *)pdata));
1227}
1228EXPORT_SYMBOL_GPL(devm_free_percpu);
1229