radeon_sa.c revision 1.2
1/* 2 * Copyright 2011 Red Hat Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26/* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 */ 30/* Algorithm: 31 * 32 * We store the last allocated bo in "hole", we always try to allocate 33 * after the last allocated bo. Principle is that in a linear GPU ring 34 * progression was is after last is the oldest bo we allocated and thus 35 * the first one that should no longer be in use by the GPU. 36 * 37 * If it's not the case we skip over the bo after last to the closest 38 * done bo if such one exist. If none exist and we are not asked to 39 * block we report failure to allocate. 40 * 41 * If we are asked to block we wait on all the oldest fence of all 42 * rings. We just wait for any of those fence to complete. 43 */ 44#include <drm/drmP.h> 45#include "radeon.h" 46 47static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo); 48static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); 49 50int radeon_sa_bo_manager_init(struct radeon_device *rdev, 51 struct radeon_sa_manager *sa_manager, 52 unsigned size, u32 align, u32 domain) 53{ 54 int i, r; 55 56#ifdef __NetBSD__ 57 spin_lock_init(&sa_manager->wq_lock); 58 DRM_INIT_WAITQUEUE(&sa_manager->wq, "radsabom"); 59#else 60 init_waitqueue_head(&sa_manager->wq); 61#endif 62 sa_manager->bo = NULL; 63 sa_manager->size = size; 64 sa_manager->domain = domain; 65 sa_manager->align = align; 66 sa_manager->hole = &sa_manager->olist; 67 INIT_LIST_HEAD(&sa_manager->olist); 68 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 69 INIT_LIST_HEAD(&sa_manager->flist[i]); 70 } 71 72 r = radeon_bo_create(rdev, size, align, true, 73 domain, NULL, &sa_manager->bo); 74 if (r) { 75 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 76 return r; 77 } 78 79 return r; 80} 81 82void radeon_sa_bo_manager_fini(struct radeon_device *rdev, 83 struct radeon_sa_manager *sa_manager) 84{ 85 struct radeon_sa_bo *sa_bo, *tmp; 86 87 if (!list_empty(&sa_manager->olist)) { 88 sa_manager->hole = &sa_manager->olist, 89 radeon_sa_bo_try_free(sa_manager); 90 if (!list_empty(&sa_manager->olist)) { 91 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); 92 } 93 } 94 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { 95 radeon_sa_bo_remove_locked(sa_bo); 96 } 97 radeon_bo_unref(&sa_manager->bo); 98 sa_manager->size = 0; 99#ifdef __NetBSD__ 100 DRM_DESTROY_WAITQUEUE(&sa_manager->wq); 101 spin_lock_destroy(&sa_manager->wq_lock); 102#endif 103} 104 105int radeon_sa_bo_manager_start(struct radeon_device *rdev, 106 struct radeon_sa_manager *sa_manager) 107{ 108 int r; 109 110 if (sa_manager->bo == NULL) { 111 dev_err(rdev->dev, "no bo for sa manager\n"); 112 return -EINVAL; 113 } 114 115 /* map the buffer */ 116 r = radeon_bo_reserve(sa_manager->bo, false); 117 if (r) { 118 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); 119 return r; 120 } 121 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); 122 if (r) { 123 radeon_bo_unreserve(sa_manager->bo); 124 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); 125 return r; 126 } 127 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); 128 radeon_bo_unreserve(sa_manager->bo); 129 return r; 130} 131 132int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, 133 struct radeon_sa_manager *sa_manager) 134{ 135 int r; 136 137 if (sa_manager->bo == NULL) { 138 dev_err(rdev->dev, "no bo for sa manager\n"); 139 return -EINVAL; 140 } 141 142 r = radeon_bo_reserve(sa_manager->bo, false); 143 if (!r) { 144 radeon_bo_kunmap(sa_manager->bo); 145 radeon_bo_unpin(sa_manager->bo); 146 radeon_bo_unreserve(sa_manager->bo); 147 } 148 return r; 149} 150 151static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo) 152{ 153 struct radeon_sa_manager *sa_manager = sa_bo->manager; 154 if (sa_manager->hole == &sa_bo->olist) { 155 sa_manager->hole = sa_bo->olist.prev; 156 } 157 list_del_init(&sa_bo->olist); 158 list_del_init(&sa_bo->flist); 159 radeon_fence_unref(&sa_bo->fence); 160 kfree(sa_bo); 161} 162 163static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager) 164{ 165 struct radeon_sa_bo *sa_bo, *tmp; 166 167 if (sa_manager->hole->next == &sa_manager->olist) 168 return; 169 170 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist); 171 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 172 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) { 173 return; 174 } 175 radeon_sa_bo_remove_locked(sa_bo); 176 } 177} 178 179static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager) 180{ 181 struct list_head *hole = sa_manager->hole; 182 183 if (hole != &sa_manager->olist) { 184 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset; 185 } 186 return 0; 187} 188 189static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager) 190{ 191 struct list_head *hole = sa_manager->hole; 192 193 if (hole->next != &sa_manager->olist) { 194 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset; 195 } 196 return sa_manager->size; 197} 198 199static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager, 200 struct radeon_sa_bo *sa_bo, 201 unsigned size, unsigned align) 202{ 203 unsigned soffset, eoffset, wasted; 204 205 soffset = radeon_sa_bo_hole_soffset(sa_manager); 206 eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 207 wasted = (align - (soffset % align)) % align; 208 209 if ((eoffset - soffset) >= (size + wasted)) { 210 soffset += wasted; 211 212 sa_bo->manager = sa_manager; 213 sa_bo->soffset = soffset; 214 sa_bo->eoffset = soffset + size; 215 list_add(&sa_bo->olist, sa_manager->hole); 216 INIT_LIST_HEAD(&sa_bo->flist); 217 sa_manager->hole = &sa_bo->olist; 218 return true; 219 } 220 return false; 221} 222 223/** 224 * radeon_sa_event - Check if we can stop waiting 225 * 226 * @sa_manager: pointer to the sa_manager 227 * @size: number of bytes we want to allocate 228 * @align: alignment we need to match 229 * 230 * Check if either there is a fence we can wait for or 231 * enough free memory to satisfy the allocation directly 232 */ 233static bool radeon_sa_event(struct radeon_sa_manager *sa_manager, 234 unsigned size, unsigned align) 235{ 236 unsigned soffset, eoffset, wasted; 237 int i; 238 239 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 240 if (!list_empty(&sa_manager->flist[i])) { 241 return true; 242 } 243 } 244 245 soffset = radeon_sa_bo_hole_soffset(sa_manager); 246 eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 247 wasted = (align - (soffset % align)) % align; 248 249 if ((eoffset - soffset) >= (size + wasted)) { 250 return true; 251 } 252 253 return false; 254} 255 256static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, 257 struct radeon_fence **fences, 258 unsigned *tries) 259{ 260 struct radeon_sa_bo *best_bo = NULL; 261 unsigned i, soffset, best, tmp; 262 263 /* if hole points to the end of the buffer */ 264 if (sa_manager->hole->next == &sa_manager->olist) { 265 /* try again with its beginning */ 266 sa_manager->hole = &sa_manager->olist; 267 return true; 268 } 269 270 soffset = radeon_sa_bo_hole_soffset(sa_manager); 271 /* to handle wrap around we add sa_manager->size */ 272 best = sa_manager->size * 2; 273 /* go over all fence list and try to find the closest sa_bo 274 * of the current last 275 */ 276 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 277 struct radeon_sa_bo *sa_bo; 278 279 if (list_empty(&sa_manager->flist[i])) { 280 continue; 281 } 282 283 sa_bo = list_first_entry(&sa_manager->flist[i], 284 struct radeon_sa_bo, flist); 285 286 if (!radeon_fence_signaled(sa_bo->fence)) { 287 fences[i] = sa_bo->fence; 288 continue; 289 } 290 291 /* limit the number of tries each ring gets */ 292 if (tries[i] > 2) { 293 continue; 294 } 295 296 tmp = sa_bo->soffset; 297 if (tmp < soffset) { 298 /* wrap around, pretend it's after */ 299 tmp += sa_manager->size; 300 } 301 tmp -= soffset; 302 if (tmp < best) { 303 /* this sa bo is the closest one */ 304 best = tmp; 305 best_bo = sa_bo; 306 } 307 } 308 309 if (best_bo) { 310 ++tries[best_bo->fence->ring]; 311 sa_manager->hole = best_bo->olist.prev; 312 313 /* we knew that this one is signaled, 314 so it's save to remote it */ 315 radeon_sa_bo_remove_locked(best_bo); 316 return true; 317 } 318 return false; 319} 320 321int radeon_sa_bo_new(struct radeon_device *rdev, 322 struct radeon_sa_manager *sa_manager, 323 struct radeon_sa_bo **sa_bo, 324 unsigned size, unsigned align) 325{ 326 struct radeon_fence *fences[RADEON_NUM_RINGS]; 327 unsigned tries[RADEON_NUM_RINGS]; 328 int i, r; 329 330 BUG_ON(align > sa_manager->align); 331 BUG_ON(size > sa_manager->size); 332 333 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); 334 if ((*sa_bo) == NULL) { 335 return -ENOMEM; 336 } 337 (*sa_bo)->manager = sa_manager; 338 (*sa_bo)->fence = NULL; 339 INIT_LIST_HEAD(&(*sa_bo)->olist); 340 INIT_LIST_HEAD(&(*sa_bo)->flist); 341 342#ifdef __NetBSD__ 343 spin_lock(&sa_manager->wq_lock); 344#else 345 spin_lock(&sa_manager->wq.lock); 346#endif 347 do { 348 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 349 fences[i] = NULL; 350 tries[i] = 0; 351 } 352 353 do { 354 radeon_sa_bo_try_free(sa_manager); 355 356 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo, 357 size, align)) { 358#ifdef __NetBSD__ 359 spin_unlock(&sa_manager->wq_lock); 360#else 361 spin_unlock(&sa_manager->wq.lock); 362#endif 363 return 0; 364 } 365 366 /* see if we can skip over some allocations */ 367 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 368 369#ifdef __NetBSD__ 370 spin_unlock(&sa_manager->wq_lock); 371 r = radeon_fence_wait_any(rdev, fences, false); 372 spin_lock(&sa_manager->wq_lock); 373 /* if we have nothing to wait for block */ 374 if (r == -ENOENT) 375 DRM_SPIN_WAIT_UNTIL(r, &sa_manager->wq, 376 &sa_manager->wq_lock, 377 radeon_sa_event(sa_manager, size, align)); 378#else 379 spin_unlock(&sa_manager->wq.lock); 380 r = radeon_fence_wait_any(rdev, fences, false); 381 spin_lock(&sa_manager->wq.lock); 382 /* if we have nothing to wait for block */ 383 if (r == -ENOENT) { 384 r = wait_event_interruptible_locked( 385 sa_manager->wq, 386 radeon_sa_event(sa_manager, size, align) 387 ); 388 } 389#endif 390 391 } while (!r); 392 393#ifdef __NetBSD__ 394 spin_unlock(&sa_manager->wq_lock); 395#else 396 spin_unlock(&sa_manager->wq.lock); 397#endif 398 kfree(*sa_bo); 399 *sa_bo = NULL; 400 return r; 401} 402 403void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, 404 struct radeon_fence *fence) 405{ 406 struct radeon_sa_manager *sa_manager; 407 408 if (sa_bo == NULL || *sa_bo == NULL) { 409 return; 410 } 411 412 sa_manager = (*sa_bo)->manager; 413#ifdef __NetBSD__ 414 spin_lock(&sa_manager->wq_lock); 415#else 416 spin_lock(&sa_manager->wq.lock); 417#endif 418 if (fence && !radeon_fence_signaled(fence)) { 419 (*sa_bo)->fence = radeon_fence_ref(fence); 420 list_add_tail(&(*sa_bo)->flist, 421 &sa_manager->flist[fence->ring]); 422 } else { 423 radeon_sa_bo_remove_locked(*sa_bo); 424 } 425#ifdef __NetBSD__ 426 DRM_SPIN_WAKEUP_ALL(&sa_manager->wq, &sa_manager->wq_lock); 427 spin_unlock(&sa_manager->wq_lock); 428#else 429 wake_up_all_locked(&sa_manager->wq); 430 spin_unlock(&sa_manager->wq.lock); 431#endif 432 *sa_bo = NULL; 433} 434 435#if defined(CONFIG_DEBUG_FS) 436void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, 437 struct seq_file *m) 438{ 439 struct radeon_sa_bo *i; 440 441 spin_lock(&sa_manager->wq.lock); 442 list_for_each_entry(i, &sa_manager->olist, olist) { 443 uint64_t soffset = i->soffset + sa_manager->gpu_addr; 444 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; 445 if (&i->olist == sa_manager->hole) { 446 seq_printf(m, ">"); 447 } else { 448 seq_printf(m, " "); 449 } 450 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 451 soffset, eoffset, eoffset - soffset); 452 if (i->fence) { 453 seq_printf(m, " protected by 0x%016llx on ring %d", 454 i->fence->seq, i->fence->ring); 455 } 456 seq_printf(m, "\n"); 457 } 458 spin_unlock(&sa_manager->wq.lock); 459} 460#endif 461