Lines Matching defs:wmi

91 struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
93 struct wmi *wmi;
95 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
96 if (!wmi)
99 wmi->drv_priv = priv;
100 wmi->stopped = false;
101 skb_queue_head_init(&wmi->wmi_event_queue);
102 spin_lock_init(&wmi->wmi_lock);
103 spin_lock_init(&wmi->event_lock);
104 mutex_init(&wmi->op_mutex);
105 mutex_init(&wmi->multi_write_mutex);
106 mutex_init(&wmi->multi_rmw_mutex);
107 init_completion(&wmi->cmd_wait);
108 INIT_LIST_HEAD(&wmi->pending_tx_events);
109 tasklet_setup(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet);
111 return wmi;
116 struct wmi *wmi = priv->wmi;
118 mutex_lock(&wmi->op_mutex);
119 wmi->stopped = true;
120 mutex_unlock(&wmi->op_mutex);
125 kfree(priv->wmi);
132 tasklet_kill(&priv->wmi->wmi_event_tasklet);
133 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
134 __skb_queue_purge(&priv->wmi->wmi_event_queue);
135 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
140 struct wmi *wmi = from_tasklet(wmi, t, wmi_event_tasklet);
141 struct ath9k_htc_priv *priv = wmi->drv_priv;
150 spin_lock_irqsave(&wmi->wmi_lock, flags);
151 skb = __skb_dequeue(&wmi->wmi_event_queue);
153 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
156 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
174 ieee80211_queue_work(wmi->drv_priv->hw,
175 &wmi->drv_priv->fatal_work);
205 static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
209 if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
210 memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
212 complete(&wmi->cmd_wait);
218 struct wmi *wmi = priv;
223 if (unlikely(wmi->stopped))
234 spin_lock_irqsave(&wmi->wmi_lock, flags);
235 __skb_queue_tail(&wmi->wmi_event_queue, skb);
236 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
237 tasklet_schedule(&wmi->wmi_event_tasklet);
242 spin_lock_irqsave(&wmi->wmi_lock, flags);
243 if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
244 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
249 ath9k_wmi_rsp_callback(wmi, skb);
250 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
262 int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
268 wmi->htc = htc;
272 connect.ep_callbacks.priv = wmi;
277 ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
281 *wmi_ctrl_epid = wmi->ctrl_epid;
286 static int ath9k_wmi_cmd_issue(struct wmi *wmi,
296 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
298 spin_lock_irqsave(&wmi->wmi_lock, flags);
301 wmi->cmd_rsp_buf = rsp_buf;
302 wmi->cmd_rsp_len = rsp_len;
304 wmi->last_seq_id = wmi->tx_seq_id;
305 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
307 return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
310 int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
315 struct ath_hw *ah = wmi->drv_priv->ah;
336 mutex_lock(&wmi->op_mutex);
338 /* check if wmi stopped flag is set */
339 if (unlikely(wmi->stopped)) {
344 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len);
348 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
352 spin_lock_irqsave(&wmi->wmi_lock, flags);
353 wmi->last_seq_id = 0;
354 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
355 mutex_unlock(&wmi->op_mutex);
359 mutex_unlock(&wmi->op_mutex);
365 mutex_unlock(&wmi->op_mutex);