• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/sfc/

Lines Matching refs:mcdi

50 	return &nic_data->mcdi;
55 struct efx_mcdi_iface *mcdi;
60 mcdi = efx_mcdi(efx);
61 init_waitqueue_head(&mcdi->wq);
62 spin_lock_init(&mcdi->iface_lock);
63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
64 mcdi->mode = MCDI_MODE_POLL;
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
82 seqno = mcdi->seqno & SEQ_MASK;
84 if (mcdi->mode == MCDI_MODE_EVENTS)
109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
135 * because generally mcdi responses are fast. After that, back off
165 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
170 if (error && mcdi->resplen == 0) {
173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
176 respseq, mcdi->seqno);
202 mcdi->resprc = rc;
204 mcdi->resplen = 0;
235 static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
239 wait_event(mcdi->wq,
240 atomic_cmpxchg(&mcdi->state,
248 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
251 mcdi->wq,
252 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
264 if (mcdi->mode == MCDI_MODE_POLL)
270 static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
278 if (atomic_cmpxchg(&mcdi->state,
281 wake_up(&mcdi->wq);
288 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
290 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
291 wake_up(&mcdi->wq);
297 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
300 spin_lock(&mcdi->iface_lock);
302 if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
303 if (mcdi->credits)
305 --mcdi->credits;
309 "seq 0x%x\n", seqno, mcdi->seqno);
311 mcdi->resprc = errno;
312 mcdi->resplen = datalen;
317 spin_unlock(&mcdi->iface_lock);
320 efx_mcdi_complete(mcdi);
329 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
333 efx_mcdi_acquire(mcdi);
336 spin_lock_bh(&mcdi->iface_lock);
337 ++mcdi->seqno;
338 spin_unlock_bh(&mcdi->iface_lock);
342 if (mcdi->mode == MCDI_MODE_POLL)
352 spin_lock_bh(&mcdi->iface_lock);
353 ++mcdi->seqno;
354 ++mcdi->credits;
355 spin_unlock_bh(&mcdi->iface_lock);
359 cmd, (int)inlen, mcdi->mode);
367 spin_lock_bh(&mcdi->iface_lock);
368 rc = -mcdi->resprc;
369 resplen = mcdi->resplen;
370 spin_unlock_bh(&mcdi->iface_lock);
374 min(outlen, mcdi->resplen + 3) & ~0x3);
389 efx_mcdi_release(mcdi);
395 struct efx_mcdi_iface *mcdi;
400 mcdi = efx_mcdi(efx);
401 if (mcdi->mode == MCDI_MODE_POLL)
405 * mcdi requests are always completed in shared memory. We do this by
412 mcdi->mode = MCDI_MODE_POLL;
414 efx_mcdi_complete(mcdi);
419 struct efx_mcdi_iface *mcdi;
424 mcdi = efx_mcdi(efx);
426 if (mcdi->mode == MCDI_MODE_EVENTS)
436 efx_mcdi_acquire(mcdi);
437 mcdi->mode = MCDI_MODE_EVENTS;
438 efx_mcdi_release(mcdi);
443 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
446 * either by a BADASSERT or REBOOT event. If the mcdi interface is
448 * set the header correctly. However, if the mcdi interface is waiting
461 spin_lock(&mcdi->iface_lock);
462 if (efx_mcdi_complete(mcdi)) {
463 if (mcdi->mode == MCDI_MODE_EVENTS) {
464 mcdi->resprc = rc;
465 mcdi->resplen = 0;
471 spin_unlock(&mcdi->iface_lock);