1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26/* 27 * Authors: 28 * Christian K��nig <deathsimple@vodafone.de> 29 */ 30 31#include <linux/sort.h> 32#include <linux/uaccess.h> 33 34#include "amdgpu.h" 35#include "amdgpu_trace.h" 36 37#define AMDGPU_BO_LIST_MAX_PRIORITY 32u 38#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) 39 40static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) 41{ 42 struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list, 43 rhead); 44 mutex_destroy(&list->bo_list_mutex); 45 kvfree(list); 46} 47 48static void amdgpu_bo_list_free(struct kref *ref) 49{ 50 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list, 51 refcount); 52 struct amdgpu_bo_list_entry *e; 53 54 amdgpu_bo_list_for_each_entry(e, list) 55 amdgpu_bo_unref(&e->bo); 56 call_rcu(&list->rhead, amdgpu_bo_list_free_rcu); 57} 58 59static int amdgpu_bo_list_entry_cmp(const void *_a, const void *_b) 60{ 61 const struct amdgpu_bo_list_entry *a = _a, *b = _b; 62 63 if (a->priority > b->priority) 64 return 1; 65 if (a->priority < b->priority) 66 return -1; 67 return 0; 68} 69 70int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, 71 struct drm_amdgpu_bo_list_entry *info, 72 size_t num_entries, struct amdgpu_bo_list **result) 73{ 74 unsigned last_entry = 0, first_userptr = num_entries; 75 struct amdgpu_bo_list_entry *array; 76 struct amdgpu_bo_list *list; 77 uint64_t total_size = 0; 78 unsigned i; 79 int r; 80 81 list = kvzalloc(struct_size(list, entries, num_entries), GFP_KERNEL); 82 if (!list) 83 return -ENOMEM; 84 85 kref_init(&list->refcount); 86 87 list->num_entries = num_entries; 88 array = list->entries; 89 90 for (i = 0; i < num_entries; ++i) { 91 struct amdgpu_bo_list_entry *entry; 92 struct drm_gem_object *gobj; 93 struct amdgpu_bo *bo; 94 struct mm_struct *usermm; 95 96 gobj = drm_gem_object_lookup(filp, info[i].bo_handle); 97 if (!gobj) { 98 r = -ENOENT; 99 goto error_free; 100 } 101 102 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 103 drm_gem_object_put(gobj); 104 105 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 106 if (usermm) { 107 if (usermm != current->mm) { 108 amdgpu_bo_unref(&bo); 109 r = -EPERM; 110 goto error_free; 111 } 112 entry = &array[--first_userptr]; 113 } else { 114 entry = &array[last_entry++]; 115 } 116 117 entry->priority = min(info[i].bo_priority, 118 AMDGPU_BO_LIST_MAX_PRIORITY); 119 entry->bo = bo; 120 121 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) 122 list->gds_obj = bo; 123 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) 124 list->gws_obj = bo; 125 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA) 126 list->oa_obj = bo; 127 128 total_size += amdgpu_bo_size(bo); 129 trace_amdgpu_bo_list_set(list, bo); 130 } 131 132 list->first_userptr = first_userptr; 133 sort(array, last_entry, sizeof(struct amdgpu_bo_list_entry), 134 amdgpu_bo_list_entry_cmp, NULL); 135 136 trace_amdgpu_cs_bo_status(list->num_entries, total_size); 137 138 mutex_init(&list->bo_list_mutex); 139 *result = list; 140 return 0; 141 142error_free: 143 for (i = 0; i < last_entry; ++i) 144 amdgpu_bo_unref(&array[i].bo); 145 for (i = first_userptr; i < num_entries; ++i) 146 amdgpu_bo_unref(&array[i].bo); 147 kvfree(list); 148 return r; 149 150} 151 152static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) 153{ 154 struct amdgpu_bo_list *list; 155 156 mutex_lock(&fpriv->bo_list_lock); 157 list = idr_remove(&fpriv->bo_list_handles, id); 158 mutex_unlock(&fpriv->bo_list_lock); 159 if (list) 160 kref_put(&list->refcount, amdgpu_bo_list_free); 161} 162 163int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, 164 struct amdgpu_bo_list **result) 165{ 166 rcu_read_lock(); 167 *result = idr_find(&fpriv->bo_list_handles, id); 168 169 if (*result && kref_get_unless_zero(&(*result)->refcount)) { 170 rcu_read_unlock(); 171 return 0; 172 } 173 174 rcu_read_unlock(); 175 *result = NULL; 176 return -ENOENT; 177} 178 179void amdgpu_bo_list_put(struct amdgpu_bo_list *list) 180{ 181 kref_put(&list->refcount, amdgpu_bo_list_free); 182} 183 184int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, 185 struct drm_amdgpu_bo_list_entry **info_param) 186{ 187 const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); 188 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); 189 struct drm_amdgpu_bo_list_entry *info; 190 int r; 191 192 info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL); 193 if (!info) 194 return -ENOMEM; 195 196 /* copy the handle array from userspace to a kernel buffer */ 197 r = -EFAULT; 198 if (likely(info_size == in->bo_info_size)) { 199 unsigned long bytes = in->bo_number * 200 in->bo_info_size; 201 202 if (copy_from_user(info, uptr, bytes)) 203 goto error_free; 204 205 } else { 206 unsigned long bytes = min(in->bo_info_size, info_size); 207 unsigned i; 208 209 memset(info, 0, in->bo_number * info_size); 210 for (i = 0; i < in->bo_number; ++i) { 211 if (copy_from_user(&info[i], uptr, bytes)) 212 goto error_free; 213 214 uptr += in->bo_info_size; 215 } 216 } 217 218 *info_param = info; 219 return 0; 220 221error_free: 222 kvfree(info); 223 return r; 224} 225 226int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 227 struct drm_file *filp) 228{ 229 struct amdgpu_device *adev = drm_to_adev(dev); 230 struct amdgpu_fpriv *fpriv = filp->driver_priv; 231 union drm_amdgpu_bo_list *args = data; 232 uint32_t handle = args->in.list_handle; 233 struct drm_amdgpu_bo_list_entry *info = NULL; 234 struct amdgpu_bo_list *list, *old; 235 int r; 236 237 r = amdgpu_bo_create_list_entry_array(&args->in, &info); 238 if (r) 239 return r; 240 241 switch (args->in.operation) { 242 case AMDGPU_BO_LIST_OP_CREATE: 243 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, 244 &list); 245 if (r) 246 goto error_free; 247 248 mutex_lock(&fpriv->bo_list_lock); 249 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); 250 mutex_unlock(&fpriv->bo_list_lock); 251 if (r < 0) { 252 goto error_put_list; 253 } 254 255 handle = r; 256 break; 257 258 case AMDGPU_BO_LIST_OP_DESTROY: 259 amdgpu_bo_list_destroy(fpriv, handle); 260 handle = 0; 261 break; 262 263 case AMDGPU_BO_LIST_OP_UPDATE: 264 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, 265 &list); 266 if (r) 267 goto error_free; 268 269 mutex_lock(&fpriv->bo_list_lock); 270 old = idr_replace(&fpriv->bo_list_handles, list, handle); 271 mutex_unlock(&fpriv->bo_list_lock); 272 273 if (IS_ERR(old)) { 274 r = PTR_ERR(old); 275 goto error_put_list; 276 } 277 278 amdgpu_bo_list_put(old); 279 break; 280 281 default: 282 r = -EINVAL; 283 goto error_free; 284 } 285 286 memset(args, 0, sizeof(*args)); 287 args->out.list_handle = handle; 288 kvfree(info); 289 290 return 0; 291 292error_put_list: 293 amdgpu_bo_list_put(list); 294 295error_free: 296 kvfree(info); 297 return r; 298} 299