133965Sjdp/**************************************************************************
2218822Sdim *
378828Sobrien * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
433965Sjdp * Copyright 2016 Intel Corporation
533965Sjdp * All Rights Reserved.
633965Sjdp *
733965Sjdp * Permission is hereby granted, free of charge, to any person obtaining a
833965Sjdp * copy of this software and associated documentation files (the
933965Sjdp * "Software"), to deal in the Software without restriction, including
1033965Sjdp * without limitation the rights to use, copy, modify, merge, publish,
1133965Sjdp * distribute, sub license, and/or sell copies of the Software, and to
1233965Sjdp * permit persons to whom the Software is furnished to do so, subject to
1333965Sjdp * the following conditions:
1433965Sjdp *
1533965Sjdp * The above copyright notice and this permission notice (including the
1633965Sjdp * next paragraph) shall be included in all copies or substantial portions
1733965Sjdp * of the Software.
1833965Sjdp *
19218822Sdim * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2033965Sjdp * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2133965Sjdp * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
2233965Sjdp * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
2333965Sjdp * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24107492Sobrien * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25107492Sobrien * USE OR OTHER DEALINGS IN THE SOFTWARE.
26107492Sobrien *
27107492Sobrien *
2833965Sjdp **************************************************************************/
2933965Sjdp/*
3033965Sjdp * Authors:
3133965Sjdp * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
3233965Sjdp */
3333965Sjdp
3433965Sjdp#ifndef _DRM_MM_H_
3533965Sjdp#define _DRM_MM_H_
3633965Sjdp
3733965Sjdp/*
3833965Sjdp * Generic range manager structs
3933965Sjdp */
4033965Sjdp#include <linux/bug.h>
4133965Sjdp#include <linux/rbtree.h>
4233965Sjdp#include <linux/limits.h>
4333965Sjdp#include <linux/mm_types.h>
4433965Sjdp#include <linux/list.h>
4533965Sjdp#include <linux/spinlock.h>
4633965Sjdp#ifdef CONFIG_DRM_DEBUG_MM
4733965Sjdp#include <linux/stackdepot.h>
4833965Sjdp#endif
4933965Sjdp#include <linux/types.h>
5033965Sjdp
5133965Sjdp#include <drm/drm_print.h>
5233965Sjdp
5333965Sjdp#ifdef CONFIG_DRM_DEBUG_MM
5433965Sjdp#define DRM_MM_BUG_ON(expr) BUG_ON(expr)
5533965Sjdp#else
5633965Sjdp#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
5733965Sjdp#endif
5833965Sjdp
5933965Sjdp/**
6033965Sjdp * enum drm_mm_insert_mode - control search and allocation behaviour
6133965Sjdp *
6233965Sjdp * The &struct drm_mm range manager supports finding a suitable modes using
6333965Sjdp * a number of search trees. These trees are oranised by size, by address and
6433965Sjdp * in most recent eviction order. This allows the user to find either the
6533965Sjdp * smallest hole to reuse, the lowest or highest address to reuse, or simply
6633965Sjdp * reuse the most recent eviction that fits. When allocating the &drm_mm_node
6733965Sjdp * from within the hole, the &drm_mm_insert_mode also dictate whether to
6833965Sjdp * allocate the lowest matching address or the highest.
6933965Sjdp */
7033965Sjdpenum drm_mm_insert_mode {
7133965Sjdp	/**
7233965Sjdp	 * @DRM_MM_INSERT_BEST:
7333965Sjdp	 *
7433965Sjdp	 * Search for the smallest hole (within the search range) that fits
7533965Sjdp	 * the desired node.
7633965Sjdp	 *
7733965Sjdp	 * Allocates the node from the bottom of the found hole.
7833965Sjdp	 */
7933965Sjdp	DRM_MM_INSERT_BEST = 0,
8033965Sjdp
8133965Sjdp	/**
8233965Sjdp	 * @DRM_MM_INSERT_LOW:
8333965Sjdp	 *
8433965Sjdp	 * Search for the lowest hole (address closest to 0, within the search
8533965Sjdp	 * range) that fits the desired node.
8633965Sjdp	 *
8733965Sjdp	 * Allocates the node from the bottom of the found hole.
8833965Sjdp	 */
8933965Sjdp	DRM_MM_INSERT_LOW,
9060484Sobrien
9160484Sobrien	/**
9233965Sjdp	 * @DRM_MM_INSERT_HIGH:
9333965Sjdp	 *
9489857Sobrien	 * Search for the highest hole (address closest to U64_MAX, within the
9533965Sjdp	 * search range) that fits the desired node.
9689857Sobrien	 *
9789857Sobrien	 * Allocates the node from the *top* of the found hole. The specified
9889857Sobrien	 * alignment for the node is applied to the base of the node
9989857Sobrien	 * (&drm_mm_node.start).
10033965Sjdp	 */
10133965Sjdp	DRM_MM_INSERT_HIGH,
102218822Sdim
10333965Sjdp	/**
10433965Sjdp	 * @DRM_MM_INSERT_EVICT:
10533965Sjdp	 *
10633965Sjdp	 * Search for the most recently evicted hole (within the search range)
10733965Sjdp	 * that fits the desired node. This is appropriate for use immediately
10833965Sjdp	 * after performing an eviction scan (see drm_mm_scan_init()) and
10933965Sjdp	 * removing the selected nodes to form a hole.
110	 *
111	 * Allocates the node from the bottom of the found hole.
112	 */
113	DRM_MM_INSERT_EVICT,
114
115	/**
116	 * @DRM_MM_INSERT_ONCE:
117	 *
118	 * Only check the first hole for suitablity and report -ENOSPC
119	 * immediately otherwise, rather than check every hole until a
120	 * suitable one is found. Can only be used in conjunction with another
121	 * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
122	 */
123	DRM_MM_INSERT_ONCE = BIT(31),
124
125	/**
126	 * @DRM_MM_INSERT_HIGHEST:
127	 *
128	 * Only check the highest hole (the hole with the largest address) and
129	 * insert the node at the top of the hole or report -ENOSPC if
130	 * unsuitable.
131	 *
132	 * Does not search all holes.
133	 */
134	DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
135
136	/**
137	 * @DRM_MM_INSERT_LOWEST:
138	 *
139	 * Only check the lowest hole (the hole with the smallest address) and
140	 * insert the node at the bottom of the hole or report -ENOSPC if
141	 * unsuitable.
142	 *
143	 * Does not search all holes.
144	 */
145	DRM_MM_INSERT_LOWEST  = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
146};
147
148/**
149 * struct drm_mm_node - allocated block in the DRM allocator
150 *
151 * This represents an allocated block in a &drm_mm allocator. Except for
152 * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
153 * entirely opaque and should only be accessed through the provided funcions.
154 * Since allocation of these nodes is entirely handled by the driver they can be
155 * embedded.
156 */
157struct drm_mm_node {
158	/** @color: Opaque driver-private tag. */
159	unsigned long color;
160	/** @start: Start address of the allocated block. */
161	u64 start;
162	/** @size: Size of the allocated block. */
163	u64 size;
164	/* private: */
165	struct drm_mm *mm;
166	struct list_head node_list;
167	struct list_head hole_stack;
168	struct rb_node rb;
169	struct rb_node rb_hole_size;
170	struct rb_node rb_hole_addr;
171	u64 __subtree_last;
172	u64 hole_size;
173	unsigned long flags;
174#define DRM_MM_NODE_ALLOCATED_BIT	0
175#define DRM_MM_NODE_SCANNED_BIT		1
176#ifdef CONFIG_DRM_DEBUG_MM
177	depot_stack_handle_t stack;
178#endif
179};
180
181/**
182 * struct drm_mm - DRM allocator
183 *
184 * DRM range allocator with a few special functions and features geared towards
185 * managing GPU memory. Except for the @color_adjust callback the structure is
186 * entirely opaque and should only be accessed through the provided functions
187 * and macros. This structure can be embedded into larger driver structures.
188 */
189struct drm_mm {
190	/**
191	 * @color_adjust:
192	 *
193	 * Optional driver callback to further apply restrictions on a hole. The
194	 * node argument points at the node containing the hole from which the
195	 * block would be allocated (see drm_mm_hole_follows() and friends). The
196	 * other arguments are the size of the block to be allocated. The driver
197	 * can adjust the start and end as needed to e.g. insert guard pages.
198	 */
199	void (*color_adjust)(const struct drm_mm_node *node,
200			     unsigned long color,
201			     u64 *start, u64 *end);
202
203	/* private: */
204	/* List of all memory nodes that immediately precede a free hole. */
205	struct list_head hole_stack;
206	/* head_node.node_list is the list of all memory nodes, ordered
207	 * according to the (increasing) start address of the memory node. */
208	struct drm_mm_node head_node;
209	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
210	struct rb_root_cached interval_tree;
211	struct rb_root_cached holes_size;
212	struct rb_root holes_addr;
213
214	unsigned long scan_active;
215};
216
217/**
218 * struct drm_mm_scan - DRM allocator eviction roaster data
219 *
220 * This structure tracks data needed for the eviction roaster set up using
221 * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
222 * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
223 * be accessed through the provided functions and macros. It is meant to be
224 * allocated temporarily by the driver on the stack.
225 */
226struct drm_mm_scan {
227	/* private: */
228	struct drm_mm *mm;
229
230	u64 size;
231	u64 alignment;
232	u64 remainder_mask;
233
234	u64 range_start;
235	u64 range_end;
236
237	u64 hit_start;
238	u64 hit_end;
239
240	unsigned long color;
241	enum drm_mm_insert_mode mode;
242};
243
244/**
245 * drm_mm_node_allocated - checks whether a node is allocated
246 * @node: drm_mm_node to check
247 *
248 * Drivers are required to clear a node prior to using it with the
249 * drm_mm range manager.
250 *
251 * Drivers should use this helper for proper encapsulation of drm_mm
252 * internals.
253 *
254 * Returns:
255 * True if the @node is allocated.
256 */
257static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
258{
259	return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
260}
261
262/**
263 * drm_mm_initialized - checks whether an allocator is initialized
264 * @mm: drm_mm to check
265 *
266 * Drivers should clear the struct drm_mm prior to initialisation if they
267 * want to use this function.
268 *
269 * Drivers should use this helper for proper encapsulation of drm_mm
270 * internals.
271 *
272 * Returns:
273 * True if the @mm is initialized.
274 */
275static inline bool drm_mm_initialized(const struct drm_mm *mm)
276{
277	return READ_ONCE(mm->hole_stack.next);
278}
279
280/**
281 * drm_mm_hole_follows - checks whether a hole follows this node
282 * @node: drm_mm_node to check
283 *
284 * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
285 * If you wish to know whether a hole follows this particular node,
286 * query this function. See also drm_mm_hole_node_start() and
287 * drm_mm_hole_node_end().
288 *
289 * Returns:
290 * True if a hole follows the @node.
291 */
292static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
293{
294	return node->hole_size;
295}
296
297static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
298{
299	return hole_node->start + hole_node->size;
300}
301
302/**
303 * drm_mm_hole_node_start - computes the start of the hole following @node
304 * @hole_node: drm_mm_node which implicitly tracks the following hole
305 *
306 * This is useful for driver-specific debug dumpers. Otherwise drivers should
307 * not inspect holes themselves. Drivers must check first whether a hole indeed
308 * follows by looking at drm_mm_hole_follows()
309 *
310 * Returns:
311 * Start of the subsequent hole.
312 */
313static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
314{
315	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
316	return __drm_mm_hole_node_start(hole_node);
317}
318
319static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
320{
321	return list_next_entry(hole_node, node_list)->start;
322}
323
324/**
325 * drm_mm_hole_node_end - computes the end of the hole following @node
326 * @hole_node: drm_mm_node which implicitly tracks the following hole
327 *
328 * This is useful for driver-specific debug dumpers. Otherwise drivers should
329 * not inspect holes themselves. Drivers must check first whether a hole indeed
330 * follows by looking at drm_mm_hole_follows().
331 *
332 * Returns:
333 * End of the subsequent hole.
334 */
335static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
336{
337	return __drm_mm_hole_node_end(hole_node);
338}
339
340/**
341 * drm_mm_nodes - list of nodes under the drm_mm range manager
342 * @mm: the struct drm_mm range manger
343 *
344 * As the drm_mm range manager hides its node_list deep with its
345 * structure, extracting it looks painful and repetitive. This is
346 * not expected to be used outside of the drm_mm_for_each_node()
347 * macros and similar internal functions.
348 *
349 * Returns:
350 * The node list, may be empty.
351 */
352#define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
353
354/**
355 * drm_mm_for_each_node - iterator to walk over all allocated nodes
356 * @entry: &struct drm_mm_node to assign to in each iteration step
357 * @mm: &drm_mm allocator to walk
358 *
359 * This iterator walks over all nodes in the range allocator. It is implemented
360 * with list_for_each(), so not save against removal of elements.
361 */
362#define drm_mm_for_each_node(entry, mm) \
363	list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
364
365/**
366 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
367 * @entry: &struct drm_mm_node to assign to in each iteration step
368 * @next: &struct drm_mm_node to store the next step
369 * @mm: &drm_mm allocator to walk
370 *
371 * This iterator walks over all nodes in the range allocator. It is implemented
372 * with list_for_each_safe(), so save against removal of elements.
373 */
374#define drm_mm_for_each_node_safe(entry, next, mm) \
375	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
376
377/**
378 * drm_mm_for_each_hole - iterator to walk over all holes
379 * @pos: &drm_mm_node used internally to track progress
380 * @mm: &drm_mm allocator to walk
381 * @hole_start: ulong variable to assign the hole start to on each iteration
382 * @hole_end: ulong variable to assign the hole end to on each iteration
383 *
384 * This iterator walks over all holes in the range allocator. It is implemented
385 * with list_for_each(), so not save against removal of elements. @entry is used
386 * internally and will not reflect a real drm_mm_node for the very first hole.
387 * Hence users of this iterator may not access it.
388 *
389 * Implementation Note:
390 * We need to inline list_for_each_entry in order to be able to set hole_start
391 * and hole_end on each iteration while keeping the macro sane.
392 */
393#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
394	for (pos = list_first_entry(&(mm)->hole_stack, \
395				    typeof(*pos), hole_stack); \
396	     &pos->hole_stack != &(mm)->hole_stack ? \
397	     hole_start = drm_mm_hole_node_start(pos), \
398	     hole_end = hole_start + pos->hole_size, \
399	     1 : 0; \
400	     pos = list_next_entry(pos, hole_stack))
401
402/*
403 * Basic range manager support (drm_mm.c)
404 */
405int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
406int drm_mm_insert_node_in_range(struct drm_mm *mm,
407				struct drm_mm_node *node,
408				u64 size,
409				u64 alignment,
410				unsigned long color,
411				u64 start,
412				u64 end,
413				enum drm_mm_insert_mode mode);
414
415/**
416 * drm_mm_insert_node_generic - search for space and insert @node
417 * @mm: drm_mm to allocate from
418 * @node: preallocate node to insert
419 * @size: size of the allocation
420 * @alignment: alignment of the allocation
421 * @color: opaque tag value to use for this node
422 * @mode: fine-tune the allocation search and placement
423 *
424 * This is a simplified version of drm_mm_insert_node_in_range() with no
425 * range restrictions applied.
426 *
427 * The preallocated node must be cleared to 0.
428 *
429 * Returns:
430 * 0 on success, -ENOSPC if there's no suitable hole.
431 */
432static inline int
433drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
434			   u64 size, u64 alignment,
435			   unsigned long color,
436			   enum drm_mm_insert_mode mode)
437{
438	return drm_mm_insert_node_in_range(mm, node,
439					   size, alignment, color,
440					   0, U64_MAX, mode);
441}
442
443/**
444 * drm_mm_insert_node - search for space and insert @node
445 * @mm: drm_mm to allocate from
446 * @node: preallocate node to insert
447 * @size: size of the allocation
448 *
449 * This is a simplified version of drm_mm_insert_node_generic() with @color set
450 * to 0.
451 *
452 * The preallocated node must be cleared to 0.
453 *
454 * Returns:
455 * 0 on success, -ENOSPC if there's no suitable hole.
456 */
457static inline int drm_mm_insert_node(struct drm_mm *mm,
458				     struct drm_mm_node *node,
459				     u64 size)
460{
461	return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
462}
463
464void drm_mm_remove_node(struct drm_mm_node *node);
465void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
466void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
467void drm_mm_takedown(struct drm_mm *mm);
468
469/**
470 * drm_mm_clean - checks whether an allocator is clean
471 * @mm: drm_mm allocator to check
472 *
473 * Returns:
474 * True if the allocator is completely free, false if there's still a node
475 * allocated in it.
476 */
477static inline bool drm_mm_clean(const struct drm_mm *mm)
478{
479	return list_empty(drm_mm_nodes(mm));
480}
481
482struct drm_mm_node *
483__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
484
485/**
486 * drm_mm_for_each_node_in_range - iterator to walk over a range of
487 * allocated nodes
488 * @node__: drm_mm_node structure to assign to in each iteration step
489 * @mm__: drm_mm allocator to walk
490 * @start__: starting offset, the first node will overlap this
491 * @end__: ending offset, the last node will start before this (but may overlap)
492 *
493 * This iterator walks over all nodes in the range allocator that lie
494 * between @start and @end. It is implemented similarly to list_for_each(),
495 * but using the internal interval tree to accelerate the search for the
496 * starting node, and so not safe against removal of elements. It assumes
497 * that @end is within (or is the upper limit of) the drm_mm allocator.
498 * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
499 * over the special _unallocated_ &drm_mm.head_node, and may even continue
500 * indefinitely.
501 */
502#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)	\
503	for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
504	     node__->start < (end__);					\
505	     node__ = list_next_entry(node__, node_list))
506
507void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
508				 struct drm_mm *mm,
509				 u64 size, u64 alignment, unsigned long color,
510				 u64 start, u64 end,
511				 enum drm_mm_insert_mode mode);
512
513/**
514 * drm_mm_scan_init - initialize lru scanning
515 * @scan: scan state
516 * @mm: drm_mm to scan
517 * @size: size of the allocation
518 * @alignment: alignment of the allocation
519 * @color: opaque tag value to use for the allocation
520 * @mode: fine-tune the allocation search and placement
521 *
522 * This is a simplified version of drm_mm_scan_init_with_range() with no range
523 * restrictions applied.
524 *
525 * This simply sets up the scanning routines with the parameters for the desired
526 * hole.
527 *
528 * Warning:
529 * As long as the scan list is non-empty, no other operations than
530 * adding/removing nodes to/from the scan list are allowed.
531 */
532static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
533				    struct drm_mm *mm,
534				    u64 size,
535				    u64 alignment,
536				    unsigned long color,
537				    enum drm_mm_insert_mode mode)
538{
539	drm_mm_scan_init_with_range(scan, mm,
540				    size, alignment, color,
541				    0, U64_MAX, mode);
542}
543
544bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
545			   struct drm_mm_node *node);
546bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
547			      struct drm_mm_node *node);
548struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
549
550void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
551
552#endif
553