Searched refs:lock (Results 151 - 175 of 6515) sorted by last modified time

1234567891011>>

/linux-master/drivers/tty/serial/
H A Dserial_core.c38 * This is used to lock changes in serial line configuration.
43 * lockdep: port->lock is initialized in two places, but we
44 * want only one lock-class:
110 * Locking: @port->lock should be held
282 * Do not free() the page under the port lock, see
397 * Do not free() the transmit buffer page under the port lock since
420 * Locking: caller is expected to take @port->lock
1763 spin_lock_irq(&port->lock);
1765 spin_unlock_irq(&port->lock);
1899 spin_lock_irqsave(&port->lock, flag
[all...]
H A Dpmac_zilog.c103 * port lock must be held and local interrupts disabled.
182 * The UART port lock must be held and local interrupts disabled.
209 __must_hold(&uap->port.lock)
479 * Peek the status register, lock not held by caller
495 * The port lock is not held.
509 * The port lock is held and interrupts are disabled.
551 * The port lock is held and interrupts are disabled.
575 * The port lock is held and interrupts are disabled.
584 * The port lock is held and interrupts are disabled.
626 * The port lock i
[all...]
/linux-master/drivers/tty/serial/8250/
H A D8250_pci.c128 spinlock_t lock; member in struct:f815xxa_data
1750 spin_lock_irqsave(&data->lock, flags);
1753 spin_unlock_irqrestore(&data->lock, flags);
1768 spin_lock_init(&data->lock);
H A D8250_dw.c130 * FIXME: this deadlocks if port->lock is already held
321 * deadlocks. First one is caused by a recursive mutex lock which
324 * the clk and tty-port mutexes lock. It happens if clock rate change
326 * tty-port mutex lock and clk_set_rate() function invocation and
503 spin_lock_init(&p->lock);
H A D8250_lpc18xx.c141 spin_lock_init(&uart.port.lock);
/linux-master/drivers/thunderbolt/
H A Dtb.c52 * acquire the lock one more time). Used to drain wq
1497 mutex_lock(&tb->lock);
1501 mutex_unlock(&tb->lock);
2179 mutex_lock(&tb->lock);
2202 mutex_unlock(&tb->lock);
2209 mutex_unlock(&tb->lock);
2251 mutex_lock(&tb->lock);
2255 mutex_unlock(&tb->lock);
2278 mutex_lock(&tb->lock);
2371 mutex_unlock(&tb->lock);
[all...]
H A Dswitch.c312 if (!mutex_trylock(&sw->tb->lock)) {
318 mutex_unlock(&sw->tb->lock);
333 if (!mutex_trylock(&sw->tb->lock))
343 mutex_unlock(&sw->tb->lock);
852 * Domain tb->lock must be held when this function is called.
1785 if (!mutex_trylock(&sw->tb->lock))
1829 mutex_unlock(&sw->tb->lock);
1898 if (!mutex_trylock(&sw->tb->lock))
1906 mutex_unlock(&sw->tb->lock);
1923 if (!mutex_trylock(&sw->tb->lock))
[all...]
/linux-master/drivers/misc/cardreader/
H A Drtsx_pcr.c317 spin_lock_irqsave(&pcr->lock, flags);
324 spin_unlock_irqrestore(&pcr->lock, flags);
349 spin_lock_irqsave(&pcr->lock, flags);
363 spin_unlock_irqrestore(&pcr->lock, flags);
374 spin_lock_irqsave(&pcr->lock, flags);
381 spin_unlock_irqrestore(&pcr->lock, flags);
384 spin_lock_irqsave(&pcr->lock, flags);
386 spin_unlock_irqrestore(&pcr->lock, flags);
493 spin_lock_irqsave(&pcr->lock, flags);
501 spin_unlock_irqrestore(&pcr->lock, flag
[all...]
/linux-master/drivers/clk/mediatek/
H A Dclk-mtk.c219 const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock)
240 mux->lock = lock;
263 gate->lock = lock;
279 div->lock = lock;
331 void __iomem *base, spinlock_t *lock,
349 hw = mtk_clk_register_composite(dev, mc, base, lock);
399 void __iomem *base, spinlock_t *lock,
218 mtk_clk_register_composite(struct device *dev, const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock) argument
329 mtk_clk_register_composites(struct device *dev, const struct mtk_composite *mcs, int num, void __iomem *base, spinlock_t *lock, struct clk_hw_onecell_data *clk_data) argument
397 mtk_clk_register_dividers(struct device *dev, const struct mtk_clk_divider *mcds, int num, void __iomem *base, spinlock_t *lock, struct clk_hw_onecell_data *clk_data) argument
[all...]
/linux-master/drivers/android/
H A Dbinder.c18 * 2) node->lock : protects most fields of binder_node.
29 * Any lock under procA must never be nested under any lock at the same
32 * Functions that require a lock held on entry indicate which lock
36 * foo_nlocked() : requires node->lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
283 * Release lock acquired via binder_proc_lock()
296 * binder_inner_proc_lock() - Acquire inner lock fo
[all...]
/linux-master/block/
H A Dblk-iocost.c418 spinlock_t lock; member in struct:ioc
733 spin_lock_irqsave(&iocg->ioc->lock, *flags);
734 spin_lock(&iocg->waitq.lock);
736 spin_lock_irqsave(&iocg->waitq.lock, *flags);
743 spin_unlock(&iocg->waitq.lock);
744 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
746 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
769 lockdep_assert_held(&ioc->lock);
918 lockdep_assert_held(&ioc->lock);
969 lockdep_assert_held(&ioc->lock);
[all...]
/linux-master/arch/x86/kvm/
H A Dx86.c2156 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
3170 read_lock_irqsave(&gpc->lock, flags);
3172 read_unlock_irqrestore(&gpc->lock, flags);
3177 read_lock_irqsave(&gpc->lock, flags);
3210 read_unlock_irqrestore(&gpc->lock, flags);
5119 * Take the srcu lock as memslots will be accessed to check the gfn
6375 spin_lock(&pic->lock);
6378 spin_unlock(&pic->lock);
6381 spin_lock(&pic->lock);
6384 spin_unlock(&pic->lock);
[all...]
H A Dpmu.c1025 mutex_lock(&kvm->lock);
1027 mutex_is_locked(&kvm->lock));
1028 mutex_unlock(&kvm->lock);
/linux-master/arch/x86/kvm/svm/
H A Dsev.c414 lockdep_assert_held(&kvm->lock);
427 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
972 /* lock userspace source and destination page */
1603 if (mutex_lock_killable(&dst_kvm->lock))
1605 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1610 mutex_unlock(&dst_kvm->lock);
1623 mutex_unlock(&dst_kvm->lock);
1624 mutex_unlock(&src_kvm->lock);
1878 mutex_lock(&kvm->lock);
1957 mutex_unlock(&kvm->lock);
[all...]
/linux-master/arch/powerpc/kernel/
H A Diommu.c254 spin_lock_irqsave(&(pool->lock), flags);
279 spin_unlock(&(pool->lock));
281 spin_lock(&(pool->lock));
300 spin_unlock(&(pool->lock));
303 spin_lock(&(pool->lock));
310 spin_unlock(&pool->lock);
312 spin_lock(&pool->lock);
319 spin_unlock_irqrestore(&(pool->lock), flags);
340 spin_unlock_irqrestore(&(pool->lock), flags);
452 spin_lock_irqsave(&(pool->lock), flag
[all...]
/linux-master/tools/perf/util/bpf_skel/
H A Dlock_contention.bpf.c14 /* lock contention flags from include/trace/events/lock.h */
46 /* actual lock contention statistics */
133 /* determine the key of lock stat */
245 static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags) argument
251 struct mutex *mutex = (void *)lock;
262 struct rw_semaphore___old *rwsem = (void *)lock;
265 struct rw_semaphore___new *rwsem = (void *)lock;
270 struct rw_semaphore *rwsem = (void *)lock;
282 static inline __u32 check_lock_type(__u64 lock, __u3 argument
529 raw_spinlock_t lock; member in struct:rq___old
[all...]
/linux-master/tools/perf/util/
H A Dannotate.c1902 mutex_lock(&dso->lock);
1910 mutex_unlock(&dso->lock);
3656 if (!strcmp(dl->ins.name, "lock"))
3730 * llvm-objdump places "lock" in a separate line and
3733 if (!strcmp(dl->ins.name, "lock") && *dl->ops.raw == '\0') {
/linux-master/net/unix/
H A Daf_unix.c469 spin_lock(&u_other->peer_wait.lock);
478 spin_unlock(&u_other->peer_wait.lock);
489 spin_lock(&u_other->peer_wait.lock);
496 spin_unlock(&u_other->peer_wait.lock);
790 spin_lock(&sk->sk_receive_queue.lock);
797 spin_unlock(&sk->sk_receive_queue.lock);
815 * SOCK_DGRAM is ordinary. So, no lock is needed.
985 spin_lock_init(&u->lock);
987 mutex_init(&u->iolock); /* single task reading lock */
988 mutex_init(&u->bindlock); /* single task binding lock */
[all...]
/linux-master/net/sched/
H A Dsch_generic.c66 * - enqueue, dequeue are serialized via qdisc root lock
67 * - ingress filtering is also serialized via qdisc root lock
76 spinlock_t *lock = NULL; local
80 lock = qdisc_lock(q);
81 spin_lock(lock);
86 /* check the reason of requeuing without tx lock first */
103 if (lock)
104 spin_unlock(lock);
122 spinlock_t *lock = NULL; local
125 lock
145 spinlock_t *lock = NULL; local
237 spinlock_t *lock = NULL; local
[all...]
/linux-master/net/netfilter/
H A Dnft_set_rbtree.c20 rwlock_t lock; member in struct:nft_rbtree
128 read_lock_bh(&priv->lock);
131 read_unlock_bh(&priv->lock);
215 read_lock_bh(&priv->lock);
218 read_unlock_bh(&priv->lock);
230 lockdep_assert_held_write(&priv->lock);
500 write_lock_bh(&priv->lock);
504 write_unlock_bh(&priv->lock);
512 write_lock_bh(&priv->lock);
516 write_unlock_bh(&priv->lock);
[all...]
/linux-master/net/core/
H A Ddev.c26 * Alan Cox : Fixed double lock.
44 * Alan Cox : Device lock protection.
49 * Dave Miller : 32bit quantity for the device lock to
204 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
212 spin_lock_irq(&sd->input_pkt_queue.lock);
221 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
229 spin_unlock_irq(&sd->input_pkt_queue.lock);
487 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, argument
493 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
507 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, argument
[all...]
/linux-master/net/bridge/
H A Dbr_private.h489 spinlock_t lock; member in struct:net_bridge
643 /* called under bridge lock */
1855 /* vlan state manipulation helpers using *_ONCE to annotate lock-free access */
/linux-master/mm/
H A Dshmem.c371 * __shmem_file_setup, one of our callers, is lock-free: it
429 spin_lock(&info->lock);
445 spin_unlock(&info->lock);
690 * to lock the page at this time.
692 * Waiting for the lock may lead to deadlock in the
850 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
883 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
893 /* Be careful as we don't hold info->lock */
1706 spin_lock(&info->lock);
1711 spin_unlock(&info->lock);
2399 shmem_lock(struct file *file, int lock, struct ucounts *ucounts) argument
4790 shmem_lock(struct file *file, int lock, struct ucounts *ucounts) argument
[all...]
H A Dmemory-failure.c161 * optimization is enabled. This will break current lock dependency
562 * We already hold rcu lock in the caller, so we don't have to call
1335 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1606 * be called inside page lock (it's recommended but not enforced).
1632 * TTU_RMAP_LOCKED to indicate we have taken the lock
1640 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1756 * lock_page(), but dax pages do not use the page lock. This
2413 spinlock_t lock; member in struct:memory_failure_cpu
2445 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2451 spin_unlock_irqrestore(&mf_cpu->lock, proc_flag
[all...]
/linux-master/kernel/
H A Dfork.c436 /* SLAB cache for vm_area_struct.lock */
445 init_rwsem(&vma->vm_lock->lock);
520 VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma);
894 * lock, to filter out false-positives in the cpumask.
1131 * the sighand lock in case orig has changed between now and
1753 spin_lock(&fs->lock);
1756 spin_unlock(&fs->lock);
1760 spin_unlock(&fs->lock);
1907 * Must be called with sighand->lock held, which is common to
1921 * sighand lock
[all...]

Completed in 566 milliseconds

1234567891011>>