Lines Matching refs:msq

131 	struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
133 security_msg_queue_free(&msq->q_perm);
134 kfree(msq);
146 struct msg_queue *msq;
151 msq = kmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT);
152 if (unlikely(!msq))
155 msq->q_perm.mode = msgflg & S_IRWXUGO;
156 msq->q_perm.key = key;
158 msq->q_perm.security = NULL;
159 retval = security_msg_queue_alloc(&msq->q_perm);
161 kfree(msq);
165 msq->q_stime = msq->q_rtime = 0;
166 msq->q_ctime = ktime_get_real_seconds();
167 msq->q_cbytes = msq->q_qnum = 0;
168 msq->q_qbytes = ns->msg_ctlmnb;
169 msq->q_lspid = msq->q_lrpid = NULL;
170 INIT_LIST_HEAD(&msq->q_messages);
171 INIT_LIST_HEAD(&msq->q_receivers);
172 INIT_LIST_HEAD(&msq->q_senders);
174 /* ipc_addid() locks msq upon success. */
175 retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
177 ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
181 ipc_unlock_object(&msq->q_perm);
184 return msq->q_perm.id;
187 static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz)
189 return msgsz + msq->q_cbytes <= msq->q_qbytes &&
190 1 + msq->q_qnum <= msq->q_qbytes;
193 static inline void ss_add(struct msg_queue *msq,
203 list_add_tail(&mss->list, &msq->q_senders);
212 static void ss_wakeup(struct msg_queue *msq,
217 struct list_head *h = &msq->q_senders;
237 else if (!msg_fits_inqueue(msq, mss->msgsz)) {
241 list_move_tail(&mss->list, &msq->q_senders);
249 static void expunge_all(struct msg_queue *msq, int res,
254 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
275 __releases(&msq->q_perm)
278 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
281 expunge_all(msq, -EIDRM, &wake_q);
282 ss_wakeup(msq, &wake_q, true);
283 msg_rmid(ns, msq);
284 ipc_unlock_object(&msq->q_perm);
288 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
292 percpu_counter_sub_local(&ns->percpu_msg_bytes, msq->q_cbytes);
293 ipc_update_pid(&msq->q_lspid, NULL);
294 ipc_update_pid(&msq->q_lrpid, NULL);
295 ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
405 struct msg_queue *msq;
418 msq = container_of(ipcp, struct msg_queue, q_perm);
420 err = security_msg_queue_msgctl(&msq->q_perm, cmd);
426 ipc_lock_object(&msq->q_perm);
440 ipc_lock_object(&msq->q_perm);
445 msq->q_qbytes = msg_qbytes;
447 msq->q_ctime = ktime_get_real_seconds();
452 expunge_all(msq, -EAGAIN, &wake_q);
457 ss_wakeup(msq, &wake_q, false);
458 ipc_unlock_object(&msq->q_perm);
469 ipc_unlock_object(&msq->q_perm);
521 struct msg_queue *msq;
528 msq = msq_obtain_object(ns, msqid);
529 if (IS_ERR(msq)) {
530 err = PTR_ERR(msq);
534 msq = msq_obtain_object_check(ns, msqid);
535 if (IS_ERR(msq)) {
536 err = PTR_ERR(msq);
543 audit_ipc_obj(&msq->q_perm);
546 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
550 err = security_msg_queue_msgctl(&msq->q_perm, cmd);
554 ipc_lock_object(&msq->q_perm);
556 if (!ipc_valid_object(&msq->q_perm)) {
557 ipc_unlock_object(&msq->q_perm);
562 kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm);
563 p->msg_stime = msq->q_stime;
564 p->msg_rtime = msq->q_rtime;
565 p->msg_ctime = msq->q_ctime;
567 p->msg_stime_high = msq->q_stime >> 32;
568 p->msg_rtime_high = msq->q_rtime >> 32;
569 p->msg_ctime_high = msq->q_ctime >> 32;
571 p->msg_cbytes = msq->q_cbytes;
572 p->msg_qnum = msq->q_qnum;
573 p->msg_qbytes = msq->q_qbytes;
574 p->msg_lspid = pid_vnr(msq->q_lspid);
575 p->msg_lrpid = pid_vnr(msq->q_lrpid);
588 err = msq->q_perm.id;
591 ipc_unlock_object(&msq->q_perm);
816 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
821 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
823 !security_msg_queue_msgrcv(&msq->q_perm, msg, msr->r_tsk,
833 ipc_update_pid(&msq->q_lrpid, task_pid(msr->r_tsk));
834 msq->q_rtime = ktime_get_real_seconds();
851 struct msg_queue *msq;
872 msq = msq_obtain_object_check(ns, msqid);
873 if (IS_ERR(msq)) {
874 err = PTR_ERR(msq);
878 ipc_lock_object(&msq->q_perm);
884 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
888 if (!ipc_valid_object(&msq->q_perm)) {
893 err = security_msg_queue_msgsnd(&msq->q_perm, msg, msgflg);
897 if (msg_fits_inqueue(msq, msgsz))
907 ss_add(msq, &s, msgsz);
909 if (!ipc_rcu_getref(&msq->q_perm)) {
914 ipc_unlock_object(&msq->q_perm);
919 ipc_lock_object(&msq->q_perm);
921 ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
923 if (!ipc_valid_object(&msq->q_perm)) {
936 ipc_update_pid(&msq->q_lspid, task_tgid(current));
937 msq->q_stime = ktime_get_real_seconds();
939 if (!pipelined_send(msq, msg, &wake_q)) {
941 list_add_tail(&msg->m_list, &msq->q_messages);
942 msq->q_cbytes += msgsz;
943 msq->q_qnum++;
952 ipc_unlock_object(&msq->q_perm);
1074 static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
1079 list_for_each_entry(msg, &msq->q_messages, m_list) {
1081 !security_msg_queue_msgrcv(&msq->q_perm, msg, current,
1102 struct msg_queue *msq;
1122 msq = msq_obtain_object_check(ns, msqid);
1123 if (IS_ERR(msq)) {
1126 return PTR_ERR(msq);
1133 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
1136 ipc_lock_object(&msq->q_perm);
1139 if (!ipc_valid_object(&msq->q_perm)) {
1144 msg = find_msg(msq, &msgtyp, mode);
1164 msq->q_qnum--;
1165 msq->q_rtime = ktime_get_real_seconds();
1166 ipc_update_pid(&msq->q_lrpid, task_tgid(current));
1167 msq->q_cbytes -= msg->m_ts;
1170 ss_wakeup(msq, &wake_q, false);
1181 list_add_tail(&msr_d.r_list, &msq->q_receivers);
1196 ipc_unlock_object(&msq->q_perm);
1205 * msq:
1233 ipc_lock_object(&msq->q_perm);
1245 ipc_unlock_object(&msq->q_perm);
1249 ipc_unlock_object(&msq->q_perm);
1346 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
1350 msq->q_perm.key,
1351 msq->q_perm.id,
1352 msq->q_perm.mode,
1353 msq->q_cbytes,
1354 msq->q_qnum,
1355 pid_nr_ns(msq->q_lspid, pid_ns),
1356 pid_nr_ns(msq->q_lrpid, pid_ns),
1357 from_kuid_munged(user_ns, msq->q_perm.uid),
1358 from_kgid_munged(user_ns, msq->q_perm.gid),
1359 from_kuid_munged(user_ns, msq->q_perm.cuid),
1360 from_kgid_munged(user_ns, msq->q_perm.cgid),
1361 msq->q_stime,
1362 msq->q_rtime,
1363 msq->q_ctime);