1/* 2 * Copyright 2011 Red Hat Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26/* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 */ 30/* Algorithm: 31 * 32 * We store the last allocated bo in "hole", we always try to allocate 33 * after the last allocated bo. Principle is that in a linear GPU ring 34 * progression was is after last is the oldest bo we allocated and thus 35 * the first one that should no longer be in use by the GPU. 36 * 37 * If it's not the case we skip over the bo after last to the closest 38 * done bo if such one exist. If none exist and we are not asked to 39 * block we report failure to allocate. 40 * 41 * If we are asked to block we wait on all the oldest fence of all 42 * rings. We just wait for any of those fence to complete. 43 */ 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD$"); 47 48#include <dev/drm2/drmP.h> 49#include "radeon.h" 50 51static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo); 52static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); 53 54int radeon_sa_bo_manager_init(struct radeon_device *rdev, 55 struct radeon_sa_manager *sa_manager, 56 unsigned size, u32 domain) 57{ 58 int i, r; 59 60 sx_init(&sa_manager->wq_lock, "drm__radeon_sa_manager_wq_mtx"); 61 cv_init(&sa_manager->wq, "drm__radeon_sa_manager__wq"); 62 sa_manager->bo = NULL; 63 sa_manager->size = size; 64 sa_manager->domain = domain; 65 sa_manager->hole = &sa_manager->olist; 66 INIT_LIST_HEAD(&sa_manager->olist); 67 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 68 INIT_LIST_HEAD(&sa_manager->flist[i]); 69 } 70 71 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, 72 RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo); 73 if (r) { 74 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 75 return r; 76 } 77 78 return r; 79} 80 81void radeon_sa_bo_manager_fini(struct radeon_device *rdev, 82 struct radeon_sa_manager *sa_manager) 83{ 84 struct radeon_sa_bo *sa_bo, *tmp; 85 86 if (!list_empty(&sa_manager->olist)) { 87 sa_manager->hole = &sa_manager->olist, 88 radeon_sa_bo_try_free(sa_manager); 89 if (!list_empty(&sa_manager->olist)) { 90 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); 91 } 92 } 93 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { 94 radeon_sa_bo_remove_locked(sa_bo); 95 } 96 radeon_bo_unref(&sa_manager->bo); 97 sa_manager->size = 0; 98 cv_destroy(&sa_manager->wq); 99 sx_destroy(&sa_manager->wq_lock); 100} 101 102int radeon_sa_bo_manager_start(struct radeon_device *rdev, 103 struct radeon_sa_manager *sa_manager) 104{ 105 int r; 106 107 if (sa_manager->bo == NULL) { 108 dev_err(rdev->dev, "no bo for sa manager\n"); 109 return -EINVAL; 110 } 111 112 /* map the buffer */ 113 r = radeon_bo_reserve(sa_manager->bo, false); 114 if (r) { 115 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); 116 return r; 117 } 118 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); 119 if (r) { 120 radeon_bo_unreserve(sa_manager->bo); 121 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); 122 return r; 123 } 124 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); 125 radeon_bo_unreserve(sa_manager->bo); 126 return r; 127} 128 129int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, 130 struct radeon_sa_manager *sa_manager) 131{ 132 int r; 133 134 if (sa_manager->bo == NULL) { 135 dev_err(rdev->dev, "no bo for sa manager\n"); 136 return -EINVAL; 137 } 138 139 r = radeon_bo_reserve(sa_manager->bo, false); 140 if (!r) { 141 radeon_bo_kunmap(sa_manager->bo); 142 radeon_bo_unpin(sa_manager->bo); 143 radeon_bo_unreserve(sa_manager->bo); 144 } 145 return r; 146} 147 148static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo) 149{ 150 struct radeon_sa_manager *sa_manager = sa_bo->manager; 151 if (sa_manager->hole == &sa_bo->olist) { 152 sa_manager->hole = sa_bo->olist.prev; 153 } 154 list_del_init(&sa_bo->olist); 155 list_del_init(&sa_bo->flist); 156 radeon_fence_unref(&sa_bo->fence); 157 free(sa_bo, DRM_MEM_DRIVER); 158} 159 160static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager) 161{ 162 struct radeon_sa_bo *sa_bo, *tmp; 163 164 if (sa_manager->hole->next == &sa_manager->olist) 165 return; 166 167 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist); 168 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 169 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) { 170 return; 171 } 172 radeon_sa_bo_remove_locked(sa_bo); 173 } 174} 175 176static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager) 177{ 178 struct list_head *hole = sa_manager->hole; 179 180 if (hole != &sa_manager->olist) { 181 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset; 182 } 183 return 0; 184} 185 186static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager) 187{ 188 struct list_head *hole = sa_manager->hole; 189 190 if (hole->next != &sa_manager->olist) { 191 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset; 192 } 193 return sa_manager->size; 194} 195 196static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager, 197 struct radeon_sa_bo *sa_bo, 198 unsigned size, unsigned align) 199{ 200 unsigned soffset, eoffset, wasted; 201 202 soffset = radeon_sa_bo_hole_soffset(sa_manager); 203 eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 204 wasted = (align - (soffset % align)) % align; 205 206 if ((eoffset - soffset) >= (size + wasted)) { 207 soffset += wasted; 208 209 sa_bo->manager = sa_manager; 210 sa_bo->soffset = soffset; 211 sa_bo->eoffset = soffset + size; 212 list_add(&sa_bo->olist, sa_manager->hole); 213 INIT_LIST_HEAD(&sa_bo->flist); 214 sa_manager->hole = &sa_bo->olist; 215 return true; 216 } 217 return false; 218} 219 220/** 221 * radeon_sa_event - Check if we can stop waiting 222 * 223 * @sa_manager: pointer to the sa_manager 224 * @size: number of bytes we want to allocate 225 * @align: alignment we need to match 226 * 227 * Check if either there is a fence we can wait for or 228 * enough free memory to satisfy the allocation directly 229 */ 230static bool radeon_sa_event(struct radeon_sa_manager *sa_manager, 231 unsigned size, unsigned align) 232{ 233 unsigned soffset, eoffset, wasted; 234 int i; 235 236 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 237 if (!list_empty(&sa_manager->flist[i])) { 238 return true; 239 } 240 } 241 242 soffset = radeon_sa_bo_hole_soffset(sa_manager); 243 eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 244 wasted = (align - (soffset % align)) % align; 245 246 if ((eoffset - soffset) >= (size + wasted)) { 247 return true; 248 } 249 250 return false; 251} 252 253static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, 254 struct radeon_fence **fences, 255 unsigned *tries) 256{ 257 struct radeon_sa_bo *best_bo = NULL; 258 unsigned i, soffset, best, tmp; 259 260 /* if hole points to the end of the buffer */ 261 if (sa_manager->hole->next == &sa_manager->olist) { 262 /* try again with its beginning */ 263 sa_manager->hole = &sa_manager->olist; 264 return true; 265 } 266 267 soffset = radeon_sa_bo_hole_soffset(sa_manager); 268 /* to handle wrap around we add sa_manager->size */ 269 best = sa_manager->size * 2; 270 /* go over all fence list and try to find the closest sa_bo 271 * of the current last 272 */ 273 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 274 struct radeon_sa_bo *sa_bo; 275 276 if (list_empty(&sa_manager->flist[i])) { 277 continue; 278 } 279 280 sa_bo = list_first_entry(&sa_manager->flist[i], 281 struct radeon_sa_bo, flist); 282 283 if (!radeon_fence_signaled(sa_bo->fence)) { 284 fences[i] = sa_bo->fence; 285 continue; 286 } 287 288 /* limit the number of tries each ring gets */ 289 if (tries[i] > 2) { 290 continue; 291 } 292 293 tmp = sa_bo->soffset; 294 if (tmp < soffset) { 295 /* wrap around, pretend it's after */ 296 tmp += sa_manager->size; 297 } 298 tmp -= soffset; 299 if (tmp < best) { 300 /* this sa bo is the closest one */ 301 best = tmp; 302 best_bo = sa_bo; 303 } 304 } 305 306 if (best_bo) { 307 ++tries[best_bo->fence->ring]; 308 sa_manager->hole = best_bo->olist.prev; 309 310 /* we knew that this one is signaled, 311 so it's save to remote it */ 312 radeon_sa_bo_remove_locked(best_bo); 313 return true; 314 } 315 return false; 316} 317 318int radeon_sa_bo_new(struct radeon_device *rdev, 319 struct radeon_sa_manager *sa_manager, 320 struct radeon_sa_bo **sa_bo, 321 unsigned size, unsigned align, bool block) 322{ 323 struct radeon_fence *fences[RADEON_NUM_RINGS]; 324 unsigned tries[RADEON_NUM_RINGS]; 325 int i, r; 326 327 KASSERT(align <= RADEON_GPU_PAGE_SIZE, ("align > RADEON_GPU_PAGE_SIZE")); 328 KASSERT(size <= sa_manager->size, ("size > sa_manager->size")); 329 330 *sa_bo = malloc(sizeof(struct radeon_sa_bo), DRM_MEM_DRIVER, M_WAITOK); 331 if ((*sa_bo) == NULL) { 332 return -ENOMEM; 333 } 334 (*sa_bo)->manager = sa_manager; 335 (*sa_bo)->fence = NULL; 336 INIT_LIST_HEAD(&(*sa_bo)->olist); 337 INIT_LIST_HEAD(&(*sa_bo)->flist); 338 339 sx_xlock(&sa_manager->wq_lock); 340 do { 341 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 342 fences[i] = NULL; 343 tries[i] = 0; 344 } 345 346 do { 347 radeon_sa_bo_try_free(sa_manager); 348 349 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo, 350 size, align)) { 351 sx_xunlock(&sa_manager->wq_lock); 352 return 0; 353 } 354 355 /* see if we can skip over some allocations */ 356 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 357 358 sx_xunlock(&sa_manager->wq_lock); 359 r = radeon_fence_wait_any(rdev, fences, false); 360 sx_xlock(&sa_manager->wq_lock); 361 /* if we have nothing to wait for block */ 362 if (r == -ENOENT && block) { 363 while (!radeon_sa_event(sa_manager, size, align)) { 364 r = -cv_wait_sig(&sa_manager->wq, 365 &sa_manager->wq_lock); 366 if (r == -EINTR) 367 r = -ERESTARTSYS; 368 if (r != 0) 369 break; 370 } 371 372 } else if (r == -ENOENT) { 373 r = -ENOMEM; 374 } 375 376 } while (!r); 377 378 sx_xunlock(&sa_manager->wq_lock); 379 free(*sa_bo, DRM_MEM_DRIVER); 380 *sa_bo = NULL; 381 return r; 382} 383 384void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, 385 struct radeon_fence *fence) 386{ 387 struct radeon_sa_manager *sa_manager; 388 389 if (sa_bo == NULL || *sa_bo == NULL) { 390 return; 391 } 392 393 sa_manager = (*sa_bo)->manager; 394 sx_xlock(&sa_manager->wq_lock); 395 if (fence && !radeon_fence_signaled(fence)) { 396 (*sa_bo)->fence = radeon_fence_ref(fence); 397 list_add_tail(&(*sa_bo)->flist, 398 &sa_manager->flist[fence->ring]); 399 } else { 400 radeon_sa_bo_remove_locked(*sa_bo); 401 } 402 cv_broadcast(&sa_manager->wq); 403 sx_xunlock(&sa_manager->wq_lock); 404 *sa_bo = NULL; 405} 406 407#if defined(CONFIG_DEBUG_FS) 408void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, 409 struct seq_file *m) 410{ 411 struct radeon_sa_bo *i; 412 413 spin_lock(&sa_manager->wq.lock); 414 list_for_each_entry(i, &sa_manager->olist, olist) { 415 if (&i->olist == sa_manager->hole) { 416 seq_printf(m, ">"); 417 } else { 418 seq_printf(m, " "); 419 } 420 seq_printf(m, "[0x%08x 0x%08x] size %8d", 421 i->soffset, i->eoffset, i->eoffset - i->soffset); 422 if (i->fence) { 423 seq_printf(m, " protected by 0x%016llx on ring %d", 424 i->fence->seq, i->fence->ring); 425 } 426 seq_printf(m, "\n"); 427 } 428 spin_unlock(&sa_manager->wq.lock); 429} 430#endif 431