1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "vmwgfx_drv.h"
32#include <drm/ttm/ttm_placement.h>
33#include <linux/idr.h>
34#include <linux/spinlock.h>
35#include <linux/kernel.h>
36
37struct vmwgfx_gmrid_man {
38	struct ttm_resource_manager manager;
39	spinlock_t lock;
40	struct ida gmr_ida;
41	uint32_t max_gmr_ids;
42	uint32_t max_gmr_pages;
43	uint32_t used_gmr_pages;
44	uint8_t type;
45};
46
47static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
48{
49	return container_of(man, struct vmwgfx_gmrid_man, manager);
50}
51
52static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
53				  struct ttm_buffer_object *bo,
54				  const struct ttm_place *place,
55				  struct ttm_resource **res)
56{
57	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
58	int id;
59
60	*res = kmalloc(sizeof(**res), GFP_KERNEL);
61	if (!*res)
62		return -ENOMEM;
63
64	ttm_resource_init(bo, place, *res);
65
66	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
67	if (id < 0) {
68		ttm_resource_fini(man, *res);
69		kfree(*res);
70		return id;
71	}
72
73	spin_lock(&gman->lock);
74
75	if (gman->max_gmr_pages > 0) {
76		gman->used_gmr_pages += PFN_UP((*res)->size);
77		/*
78		 * Because the graphics memory is a soft limit we can try to
79		 * expand it instead of letting the userspace apps crash.
80		 * We're just going to have a sane limit (half of RAM)
81		 * on the number of MOB's that we create and will try to keep
82		 * the system running until we reach that.
83		 */
84		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) {
85			const unsigned long max_graphics_pages = totalram_pages() / 2;
86			uint32_t new_max_pages = 0;
87
88			DRM_WARN("vmwgfx: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
89			vmw_host_printf("vmwgfx, warning: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
90
91			if (gman->max_gmr_pages > (max_graphics_pages / 2)) {
92				DRM_WARN("vmwgfx: guest requires more than half of RAM for graphics.\n");
93				new_max_pages = max_graphics_pages;
94			} else
95				new_max_pages = gman->max_gmr_pages * 2;
96			if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
97				DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
98					 ((new_max_pages) << (PAGE_SHIFT - 10)));
99
100				gman->max_gmr_pages = new_max_pages;
101			} else {
102				char buf[256];
103				snprintf(buf, sizeof(buf),
104					 "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
105					 ((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
106				vmw_host_printf(buf);
107				DRM_WARN("%s", buf);
108				goto nospace;
109			}
110		}
111	}
112
113	(*res)->start = id;
114
115	spin_unlock(&gman->lock);
116	return 0;
117
118nospace:
119	gman->used_gmr_pages -= PFN_UP((*res)->size);
120	spin_unlock(&gman->lock);
121	ida_free(&gman->gmr_ida, id);
122	ttm_resource_fini(man, *res);
123	kfree(*res);
124	return -ENOSPC;
125}
126
127static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
128				   struct ttm_resource *res)
129{
130	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
131
132	ida_free(&gman->gmr_ida, res->start);
133	spin_lock(&gman->lock);
134	gman->used_gmr_pages -= PFN_UP(res->size);
135	spin_unlock(&gman->lock);
136	ttm_resource_fini(man, res);
137	kfree(res);
138}
139
140static void vmw_gmrid_man_debug(struct ttm_resource_manager *man,
141				struct drm_printer *printer)
142{
143	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
144
145	BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB);
146
147	drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n",
148		   (gman->type == VMW_PL_MOB) ? "Mob" : "GMR",
149		   gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids);
150}
151
152static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
153
154int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
155{
156	struct ttm_resource_manager *man;
157	struct vmwgfx_gmrid_man *gman =
158		kzalloc(sizeof(*gman), GFP_KERNEL);
159
160	if (unlikely(!gman))
161		return -ENOMEM;
162
163	man = &gman->manager;
164
165	man->func = &vmw_gmrid_manager_func;
166	man->use_tt = true;
167	ttm_resource_manager_init(man, &dev_priv->bdev, 0);
168	spin_lock_init(&gman->lock);
169	gman->used_gmr_pages = 0;
170	ida_init(&gman->gmr_ida);
171	gman->type = type;
172
173	switch (type) {
174	case VMW_PL_GMR:
175		gman->max_gmr_ids = dev_priv->max_gmr_ids;
176		gman->max_gmr_pages = dev_priv->max_gmr_pages;
177		break;
178	case VMW_PL_MOB:
179		gman->max_gmr_ids = VMWGFX_NUM_MOB;
180		gman->max_gmr_pages = dev_priv->max_mob_pages;
181		break;
182	default:
183		BUG();
184	}
185	ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager);
186	ttm_resource_manager_set_used(man, true);
187	return 0;
188}
189
190void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
191{
192	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type);
193	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
194
195	ttm_resource_manager_set_used(man, false);
196
197	ttm_resource_manager_evict_all(&dev_priv->bdev, man);
198
199	ttm_resource_manager_cleanup(man);
200
201	ttm_set_driver_manager(&dev_priv->bdev, type, NULL);
202	ida_destroy(&gman->gmr_ida);
203	kfree(gman);
204
205}
206
207static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
208	.alloc = vmw_gmrid_man_get_node,
209	.free = vmw_gmrid_man_put_node,
210	.debug = vmw_gmrid_man_debug
211};
212