Lines Matching refs:pid_list

10 /* See pid_list.h for details */
12 static inline union lower_chunk *get_lower_chunk(struct trace_pid_list *pid_list)
16 lockdep_assert_held(&pid_list->lock);
18 if (!pid_list->lower_list)
21 chunk = pid_list->lower_list;
22 pid_list->lower_list = chunk->next;
23 pid_list->free_lower_chunks--;
24 WARN_ON_ONCE(pid_list->free_lower_chunks < 0);
30 if (pid_list->free_lower_chunks <= CHUNK_REALLOC)
31 irq_work_queue(&pid_list->refill_irqwork);
36 static inline union upper_chunk *get_upper_chunk(struct trace_pid_list *pid_list)
40 lockdep_assert_held(&pid_list->lock);
42 if (!pid_list->upper_list)
45 chunk = pid_list->upper_list;
46 pid_list->upper_list = chunk->next;
47 pid_list->free_upper_chunks--;
48 WARN_ON_ONCE(pid_list->free_upper_chunks < 0);
54 if (pid_list->free_upper_chunks <= CHUNK_REALLOC)
55 irq_work_queue(&pid_list->refill_irqwork);
60 static inline void put_lower_chunk(struct trace_pid_list *pid_list,
63 lockdep_assert_held(&pid_list->lock);
65 chunk->next = pid_list->lower_list;
66 pid_list->lower_list = chunk;
67 pid_list->free_lower_chunks++;
70 static inline void put_upper_chunk(struct trace_pid_list *pid_list,
73 lockdep_assert_held(&pid_list->lock);
75 chunk->next = pid_list->upper_list;
76 pid_list->upper_list = chunk;
77 pid_list->free_upper_chunks++;
120 * @pid_list: The pid list to test
123 * Tests if @pid is set in the @pid_list. This is usually called
129 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid)
139 if (!pid_list)
145 raw_spin_lock_irqsave(&pid_list->lock, flags);
146 upper_chunk = pid_list->upper[upper1];
152 raw_spin_unlock_irqrestore(&pid_list->lock, flags);
159 * @pid_list: The pid list to add the @pid to.
162 * Adds @pid to @pid_list. This is usually done explicitly by a user
168 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid)
178 if (!pid_list)
184 raw_spin_lock_irqsave(&pid_list->lock, flags);
185 upper_chunk = pid_list->upper[upper1];
187 upper_chunk = get_upper_chunk(pid_list);
192 pid_list->upper[upper1] = upper_chunk;
196 lower_chunk = get_lower_chunk(pid_list);
206 raw_spin_unlock_irqrestore(&pid_list->lock, flags);
212 * @pid_list: The pid list to remove the @pid from.
215 * Removes @pid from @pid_list. This is usually done explicitly by a user
221 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid)
230 if (!pid_list)
236 raw_spin_lock_irqsave(&pid_list->lock, flags);
237 upper_chunk = pid_list->upper[upper1];
249 put_lower_chunk(pid_list, lower_chunk);
252 put_upper_chunk(pid_list, upper_chunk);
253 pid_list->upper[upper1] = NULL;
257 raw_spin_unlock_irqrestore(&pid_list->lock, flags);
263 * @pid_list: The pid list to examine.
267 * Looks for the next consecutive pid that is in @pid_list starting
273 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
283 if (!pid_list)
289 raw_spin_lock_irqsave(&pid_list->lock, flags);
291 upper_chunk = pid_list->upper[upper1];
309 raw_spin_unlock_irqrestore(&pid_list->lock, flags);
319 * @pid_list: The pid list to examine.
322 * Looks for the first pid that is set in @pid_list, and places it
327 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid)
329 return trace_pid_list_next(pid_list, 0, pid);
334 struct trace_pid_list *pid_list = container_of(iwork, struct trace_pid_list,
346 raw_spin_lock(&pid_list->lock);
347 upper_count = CHUNK_ALLOC - pid_list->free_upper_chunks;
348 lower_count = CHUNK_ALLOC - pid_list->free_lower_chunks;
349 raw_spin_unlock(&pid_list->lock);
376 raw_spin_lock(&pid_list->lock);
378 *upper_next = pid_list->upper_list;
379 pid_list->upper_list = upper;
380 pid_list->free_upper_chunks += ucnt;
383 *lower_next = pid_list->lower_list;
384 pid_list->lower_list = lower;
385 pid_list->free_lower_chunks += lcnt;
387 raw_spin_unlock(&pid_list->lock);
405 * trace_pid_list_alloc - create a new pid_list
407 * Allocates a new pid_list to store pids into.
409 * Returns the pid_list on success, NULL otherwise.
413 struct trace_pid_list *pid_list;
419 pid_list = kzalloc(sizeof(*pid_list), GFP_KERNEL);
420 if (!pid_list)
423 init_irq_work(&pid_list->refill_irqwork, pid_list_refill_irq);
425 raw_spin_lock_init(&pid_list->lock);
433 chunk->next = pid_list->upper_list;
434 pid_list->upper_list = chunk;
435 pid_list->free_upper_chunks++;
444 chunk->next = pid_list->lower_list;
445 pid_list->lower_list = chunk;
446 pid_list->free_lower_chunks++;
449 return pid_list;
453 * trace_pid_list_free - Frees an allocated pid_list.
455 * Frees the memory for a pid_list that was allocated.
457 void trace_pid_list_free(struct trace_pid_list *pid_list)
463 if (!pid_list)
466 irq_work_sync(&pid_list->refill_irqwork);
468 while (pid_list->lower_list) {
471 chunk = pid_list->lower_list;
472 pid_list->lower_list = pid_list->lower_list->next;
476 while (pid_list->upper_list) {
479 chunk = pid_list->upper_list;
480 pid_list->upper_list = pid_list->upper_list->next;
485 upper = pid_list->upper[i];
494 kfree(pid_list);