1/** 2 * \file drm_lock.c 3 * IOCTLs for locking 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com> 7 */ 8 9/* 10 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com 11 * 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 14 * All Rights Reserved. 15 * 16 * Permission is hereby granted, free of charge, to any person obtaining a 17 * copy of this software and associated documentation files (the "Software"), 18 * to deal in the Software without restriction, including without limitation 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * and/or sell copies of the Software, and to permit persons to whom the 21 * Software is furnished to do so, subject to the following conditions: 22 * 23 * The above copyright notice and this permission notice (including the next 24 * paragraph) shall be included in all copies or substantial portions of the 25 * Software. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 * OTHER DEALINGS IN THE SOFTWARE. 34 */ 35 36#include "drmP.h" 37 38static int drm_notifier(void *priv); 39 40/** 41 * Lock ioctl. 42 * 43 * \param inode device inode. 44 * \param filp file pointer. 45 * \param cmd command. 46 * \param arg user argument, pointing to a drm_lock structure. 47 * \return zero on success or negative number on failure. 48 * 49 * Add the current task to the lock wait queue, and attempt to take to lock. 50 */ 51int drm_lock(struct inode *inode, struct file *filp, 52 unsigned int cmd, unsigned long arg) 53{ 54 drm_file_t *priv = filp->private_data; 55 drm_device_t *dev = priv->head->dev; 56 DECLARE_WAITQUEUE(entry, current); 57 drm_lock_t lock; 58 int ret = 0; 59 60 ++priv->lock_count; 61 62 if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) 63 return -EFAULT; 64 65 if (lock.context == DRM_KERNEL_CONTEXT) { 66 DRM_ERROR("Process %d using kernel context %d\n", 67 current->pid, lock.context); 68 return -EINVAL; 69 } 70 71 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", 72 lock.context, current->pid, 73 dev->lock.hw_lock->lock, lock.flags); 74 75 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) 76 if (lock.context < 0) 77 return -EINVAL; 78 79 add_wait_queue(&dev->lock.lock_queue, &entry); 80 spin_lock(&dev->lock.spinlock); 81 dev->lock.user_waiters++; 82 spin_unlock(&dev->lock.spinlock); 83 for (;;) { 84 __set_current_state(TASK_INTERRUPTIBLE); 85 if (!dev->lock.hw_lock) { 86 /* Device has been unregistered */ 87 ret = -EINTR; 88 break; 89 } 90 if (drm_lock_take(&dev->lock, lock.context)) { 91 dev->lock.filp = filp; 92 dev->lock.lock_time = jiffies; 93 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 94 break; /* Got lock */ 95 } 96 97 /* Contention */ 98 schedule(); 99 if (signal_pending(current)) { 100 ret = -ERESTARTSYS; 101 break; 102 } 103 } 104 spin_lock(&dev->lock.spinlock); 105 dev->lock.user_waiters--; 106 spin_unlock(&dev->lock.spinlock); 107 __set_current_state(TASK_RUNNING); 108 remove_wait_queue(&dev->lock.lock_queue, &entry); 109 110 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); 111 if (ret) return ret; 112 113 sigemptyset(&dev->sigmask); 114 sigaddset(&dev->sigmask, SIGSTOP); 115 sigaddset(&dev->sigmask, SIGTSTP); 116 sigaddset(&dev->sigmask, SIGTTIN); 117 sigaddset(&dev->sigmask, SIGTTOU); 118 dev->sigdata.context = lock.context; 119 dev->sigdata.lock = dev->lock.hw_lock; 120 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 121 122 if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) 123 dev->driver->dma_ready(dev); 124 125 if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { 126 if (dev->driver->dma_quiescent(dev)) { 127 DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); 128 return DRM_ERR(EBUSY); 129 } 130 } 131 132 if (dev->driver->kernel_context_switch && 133 dev->last_context != lock.context) { 134 dev->driver->kernel_context_switch(dev, dev->last_context, 135 lock.context); 136 } 137 138 return 0; 139} 140 141/** 142 * Unlock ioctl. 143 * 144 * \param inode device inode. 145 * \param filp file pointer. 146 * \param cmd command. 147 * \param arg user argument, pointing to a drm_lock structure. 148 * \return zero on success or negative number on failure. 149 * 150 * Transfer and free the lock. 151 */ 152int drm_unlock(struct inode *inode, struct file *filp, 153 unsigned int cmd, unsigned long arg) 154{ 155 drm_file_t *priv = filp->private_data; 156 drm_device_t *dev = priv->head->dev; 157 drm_lock_t lock; 158 unsigned long irqflags; 159 160 if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) 161 return -EFAULT; 162 163 if (lock.context == DRM_KERNEL_CONTEXT) { 164 DRM_ERROR("Process %d using kernel context %d\n", 165 current->pid, lock.context); 166 return -EINVAL; 167 } 168 169 spin_lock_irqsave(&dev->tasklet_lock, irqflags); 170 171 if (dev->locked_tasklet_func) { 172 dev->locked_tasklet_func(dev); 173 174 dev->locked_tasklet_func = NULL; 175 } 176 177 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 178 179 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 180 181 /* kernel_context_switch isn't used by any of the x86 drm 182 * modules but is required by the Sparc driver. 183 */ 184 if (dev->driver->kernel_context_switch_unlock) 185 dev->driver->kernel_context_switch_unlock(dev); 186 else { 187 if (drm_lock_free(&dev->lock,lock.context)) { 188 } 189 } 190 191 unblock_all_signals(); 192 return 0; 193} 194 195/** 196 * Take the heavyweight lock. 197 * 198 * \param lock lock pointer. 199 * \param context locking context. 200 * \return one if the lock is held, or zero otherwise. 201 * 202 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. 203 */ 204int drm_lock_take(drm_lock_data_t *lock_data, 205 unsigned int context) 206{ 207 unsigned int old, new, prev; 208 volatile unsigned int *lock = &lock_data->hw_lock->lock; 209 210 spin_lock(&lock_data->spinlock); 211 do { 212 old = *lock; 213 if (old & _DRM_LOCK_HELD) 214 new = old | _DRM_LOCK_CONT; 215 else { 216 new = context | _DRM_LOCK_HELD | 217 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? 218 _DRM_LOCK_CONT : 0); 219 } 220 prev = cmpxchg(lock, old, new); 221 } while (prev != old); 222 spin_unlock(&lock_data->spinlock); 223 224 if (_DRM_LOCKING_CONTEXT(old) == context) { 225 if (old & _DRM_LOCK_HELD) { 226 if (context != DRM_KERNEL_CONTEXT) { 227 DRM_ERROR("%d holds heavyweight lock\n", 228 context); 229 } 230 return 0; 231 } 232 } 233 234 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { 235 /* Have lock */ 236 return 1; 237 } 238 return 0; 239} 240 241/** 242 * This takes a lock forcibly and hands it to context. Should ONLY be used 243 * inside *_unlock to give lock to kernel before calling *_dma_schedule. 244 * 245 * \param dev DRM device. 246 * \param lock lock pointer. 247 * \param context locking context. 248 * \return always one. 249 * 250 * Resets the lock file pointer. 251 * Marks the lock as held by the given context, via the \p cmpxchg instruction. 252 */ 253static int drm_lock_transfer(drm_lock_data_t *lock_data, 254 unsigned int context) 255{ 256 unsigned int old, new, prev; 257 volatile unsigned int *lock = &lock_data->hw_lock->lock; 258 259 lock_data->filp = NULL; 260 do { 261 old = *lock; 262 new = context | _DRM_LOCK_HELD; 263 prev = cmpxchg(lock, old, new); 264 } while (prev != old); 265 return 1; 266} 267 268/** 269 * Free lock. 270 * 271 * \param dev DRM device. 272 * \param lock lock. 273 * \param context context. 274 * 275 * Resets the lock file pointer. 276 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task 277 * waiting on the lock queue. 278 */ 279int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) 280{ 281 unsigned int old, new, prev; 282 volatile unsigned int *lock = &lock_data->hw_lock->lock; 283 284 spin_lock(&lock_data->spinlock); 285 if (lock_data->kernel_waiters != 0) { 286 drm_lock_transfer(lock_data, 0); 287 lock_data->idle_has_lock = 1; 288 spin_unlock(&lock_data->spinlock); 289 return 1; 290 } 291 spin_unlock(&lock_data->spinlock); 292 293 do { 294 old = *lock; 295 new = _DRM_LOCKING_CONTEXT(old); 296 prev = cmpxchg(lock, old, new); 297 } while (prev != old); 298 299 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { 300 DRM_ERROR("%d freed heavyweight lock held by %d\n", 301 context, _DRM_LOCKING_CONTEXT(old)); 302 return 1; 303 } 304 wake_up_interruptible(&lock_data->lock_queue); 305 return 0; 306} 307 308/** 309 * If we get here, it means that the process has called DRM_IOCTL_LOCK 310 * without calling DRM_IOCTL_UNLOCK. 311 * 312 * If the lock is not held, then let the signal proceed as usual. If the lock 313 * is held, then set the contended flag and keep the signal blocked. 314 * 315 * \param priv pointer to a drm_sigdata structure. 316 * \return one if the signal should be delivered normally, or zero if the 317 * signal should be blocked. 318 */ 319static int drm_notifier(void *priv) 320{ 321 drm_sigdata_t *s = (drm_sigdata_t *) priv; 322 unsigned int old, new, prev; 323 324 /* Allow signal delivery if lock isn't held */ 325 if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) 326 || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) 327 return 1; 328 329 /* Otherwise, set flag to force call to 330 drmUnlock */ 331 do { 332 old = s->lock->lock; 333 new = old | _DRM_LOCK_CONT; 334 prev = cmpxchg(&s->lock->lock, old, new); 335 } while (prev != old); 336 return 0; 337} 338 339/** 340 * This function returns immediately and takes the hw lock 341 * with the kernel context if it is free, otherwise it gets the highest priority when and if 342 * it is eventually released. 343 * 344 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held 345 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause 346 * a deadlock, which is why the "idlelock" was invented). 347 * 348 * This should be sufficient to wait for GPU idle without 349 * having to worry about starvation. 350 */ 351 352void drm_idlelock_take(drm_lock_data_t *lock_data) 353{ 354 int ret = 0; 355 356 spin_lock(&lock_data->spinlock); 357 lock_data->kernel_waiters++; 358 if (!lock_data->idle_has_lock) { 359 360 spin_unlock(&lock_data->spinlock); 361 ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); 362 spin_lock(&lock_data->spinlock); 363 364 if (ret == 1) 365 lock_data->idle_has_lock = 1; 366 } 367 spin_unlock(&lock_data->spinlock); 368} 369EXPORT_SYMBOL(drm_idlelock_take); 370 371void drm_idlelock_release(drm_lock_data_t *lock_data) 372{ 373 unsigned int old, prev; 374 volatile unsigned int *lock = &lock_data->hw_lock->lock; 375 376 spin_lock(&lock_data->spinlock); 377 if (--lock_data->kernel_waiters == 0) { 378 if (lock_data->idle_has_lock) { 379 do { 380 old = *lock; 381 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); 382 } while (prev != old); 383 wake_up_interruptible(&lock_data->lock_queue); 384 lock_data->idle_has_lock = 0; 385 } 386 } 387 spin_unlock(&lock_data->spinlock); 388} 389EXPORT_SYMBOL(drm_idlelock_release); 390 391 392int drm_i_have_hw_lock(struct file *filp) 393{ 394 DRM_DEVICE; 395 396 return (priv->lock_count && dev->lock.hw_lock && 397 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && 398 dev->lock.filp == filp); 399} 400 401EXPORT_SYMBOL(drm_i_have_hw_lock); 402