dv-bfin_dma.c revision 1.1.1.8
1/* Blackfin Direct Memory Access (DMA) Channel model.
2
3   Copyright (C) 2010-2020 Free Software Foundation, Inc.
4   Contributed by Analog Devices, Inc.
5
6   This file is part of simulators.
7
8   This program is free software; you can redistribute it and/or modify
9   it under the terms of the GNU General Public License as published by
10   the Free Software Foundation; either version 3 of the License, or
11   (at your option) any later version.
12
13   This program is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16   GNU General Public License for more details.
17
18   You should have received a copy of the GNU General Public License
19   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20
21#include "config.h"
22
23#include "sim-main.h"
24#include "devices.h"
25#include "hw-device.h"
26#include "dv-bfin_dma.h"
27#include "dv-bfin_dmac.h"
28
29/* Note: This DMA implementation requires the producer to be the master when
30         the peer is MDMA.  The source is always a slave.  This way we don't
31         have the two DMA devices thrashing each other with one trying to
32         write and the other trying to read.  */
33
34struct bfin_dma
35{
36  /* This top portion matches common dv_bfin struct.  */
37  bu32 base;
38  struct hw *dma_master;
39  bool acked;
40
41  struct hw_event *handler;
42  unsigned ele_size;
43  struct hw *hw_peer;
44
45  /* Order after here is important -- matches hardware MMR layout.  */
46  union {
47    struct { bu16 ndpl, ndph; };
48    bu32 next_desc_ptr;
49  };
50  union {
51    struct { bu16 sal, sah; };
52    bu32 start_addr;
53  };
54  bu16 BFIN_MMR_16 (config);
55  bu32 _pad0;
56  bu16 BFIN_MMR_16 (x_count);
57  bs16 BFIN_MMR_16 (x_modify);
58  bu16 BFIN_MMR_16 (y_count);
59  bs16 BFIN_MMR_16 (y_modify);
60  bu32 curr_desc_ptr, curr_addr;
61  bu16 BFIN_MMR_16 (irq_status);
62  bu16 BFIN_MMR_16 (peripheral_map);
63  bu16 BFIN_MMR_16 (curr_x_count);
64  bu32 _pad1;
65  bu16 BFIN_MMR_16 (curr_y_count);
66  bu32 _pad2;
67};
68#define mmr_base()      offsetof(struct bfin_dma, next_desc_ptr)
69#define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
70
71static const char * const mmr_names[] =
72{
73  "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
74  "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
75  "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
76};
77#define mmr_name(off) mmr_names[(off) / 4]
78
79static bool
80bfin_dma_enabled (struct bfin_dma *dma)
81{
82  return (dma->config & DMAEN);
83}
84
85static bool
86bfin_dma_running (struct bfin_dma *dma)
87{
88  return (dma->irq_status & DMA_RUN);
89}
90
91static struct hw *
92bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
93{
94  if (dma->hw_peer)
95    return dma->hw_peer;
96  return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
97}
98
99static void
100bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
101{
102  bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
103  bu16 _flows[9], *flows = _flows;
104
105  HW_TRACE ((me, "dma starting up %#x", dma->config));
106
107  switch (dma->config & WDSIZE)
108    {
109    case WDSIZE_32:
110      dma->ele_size = 4;
111      break;
112    case WDSIZE_16:
113      dma->ele_size = 2;
114      break;
115    default:
116      dma->ele_size = 1;
117      break;
118    }
119
120  /* Address has to be mutiple of transfer size.  */
121  if (dma->start_addr & (dma->ele_size - 1))
122    dma->irq_status |= DMA_ERR;
123
124  if (dma->ele_size != (unsigned) abs (dma->x_modify))
125    hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
126	      dma->config, dma->x_modify);
127
128  switch (dma->config & DMAFLOW)
129    {
130    case DMAFLOW_AUTO:
131    case DMAFLOW_STOP:
132      if (ndsize)
133	hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
134      break;
135    case DMAFLOW_ARRAY:
136      if (ndsize == 0 || ndsize > 7)
137	hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
138      sim_read (hw_system (me), dma->curr_desc_ptr, (void *)flows, ndsize * 2);
139      break;
140    case DMAFLOW_SMALL:
141      if (ndsize == 0 || ndsize > 8)
142	hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
143      sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
144      break;
145    case DMAFLOW_LARGE:
146      if (ndsize == 0 || ndsize > 9)
147	hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
148      sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
149      break;
150    default:
151      hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
152    }
153
154  if (ndsize)
155    {
156      bu8 idx;
157      bu16 *stores[] = {
158	&dma->sal,
159	&dma->sah,
160	&dma->config,
161	&dma->x_count,
162	(void *) &dma->x_modify,
163	&dma->y_count,
164	(void *) &dma->y_modify,
165      };
166
167      switch (dma->config & DMAFLOW)
168	{
169	case DMAFLOW_LARGE:
170	  dma->ndph = _flows[1];
171	  --ndsize;
172	  ++flows;
173	case DMAFLOW_SMALL:
174	  dma->ndpl = _flows[0];
175	  --ndsize;
176	  ++flows;
177	  break;
178	}
179
180      for (idx = 0; idx < ndsize; ++idx)
181	*stores[idx] = flows[idx];
182    }
183
184  dma->curr_desc_ptr = dma->next_desc_ptr;
185  dma->curr_addr = dma->start_addr;
186  dma->curr_x_count = dma->x_count ? : 0xffff;
187  dma->curr_y_count = dma->y_count ? : 0xffff;
188}
189
190static int
191bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
192{
193  /* XXX: This would be the time to process the next descriptor.  */
194  /* XXX: Should this toggle Enable in dma->config ?  */
195
196  if (dma->config & DI_EN)
197    hw_port_event (me, 0, 1);
198
199  if ((dma->config & DMA2D) && dma->curr_y_count > 1)
200    {
201      dma->curr_y_count -= 1;
202      dma->curr_x_count = dma->x_count;
203
204      /* With 2D, last X transfer does not modify curr_addr.  */
205      dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
206
207      return 1;
208    }
209
210  switch (dma->config & DMAFLOW)
211    {
212    case DMAFLOW_STOP:
213      HW_TRACE ((me, "dma is complete"));
214      dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
215      return 0;
216    default:
217      bfin_dma_process_desc (me, dma);
218      return 1;
219    }
220}
221
222static void bfin_dma_hw_event_callback (struct hw *, void *);
223
224static void
225bfin_dma_reschedule (struct hw *me, unsigned delay)
226{
227  struct bfin_dma *dma = hw_data (me);
228  if (dma->handler)
229    {
230      hw_event_queue_deschedule (me, dma->handler);
231      dma->handler = NULL;
232    }
233  if (!delay)
234    return;
235  HW_TRACE ((me, "scheduling next process in %u", delay));
236  dma->handler = hw_event_queue_schedule (me, delay,
237					  bfin_dma_hw_event_callback, dma);
238}
239
240/* Chew through the DMA over and over.  */
241static void
242bfin_dma_hw_event_callback (struct hw *me, void *data)
243{
244  struct bfin_dma *dma = data;
245  struct hw *peer;
246  struct dv_bfin *bfin_peer;
247  bu8 buf[4096];
248  unsigned ret, nr_bytes, ele_count;
249
250  dma->handler = NULL;
251  peer = bfin_dma_get_peer (me, dma);
252  bfin_peer = hw_data (peer);
253  ret = 0;
254  if (dma->x_modify < 0)
255    /* XXX: This sucks performance wise.  */
256    nr_bytes = dma->ele_size;
257  else
258    nr_bytes = min (sizeof (buf), dma->curr_x_count * dma->ele_size);
259
260  /* Pumping a chunk!  */
261  bfin_peer->dma_master = me;
262  bfin_peer->acked = false;
263  if (dma->config & WNR)
264    {
265      HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
266		 (unsigned long) dma->curr_addr, nr_bytes));
267
268      ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
269      /* Has the DMA stalled ?  abort for now.  */
270      if (ret == 0)
271	goto reschedule;
272      /* XXX: How to handle partial DMA transfers ?  */
273      if (ret % dma->ele_size)
274	goto error;
275      ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
276    }
277  else
278    {
279      HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
280		 (unsigned long) dma->curr_addr, nr_bytes));
281
282      ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
283      if (ret == 0)
284	goto reschedule;
285      /* XXX: How to handle partial DMA transfers ?  */
286      if (ret % dma->ele_size)
287	goto error;
288      ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
289      if (ret == 0)
290	goto reschedule;
291    }
292
293  /* Ignore partial writes.  */
294  ele_count = ret / dma->ele_size;
295  dma->curr_addr += ele_count * dma->x_modify;
296  dma->curr_x_count -= ele_count;
297
298  if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
299    /* Still got work to do, so schedule again.  */
300 reschedule:
301    bfin_dma_reschedule (me, ret ? 1 : 5000);
302
303  return;
304
305 error:
306  /* Don't reschedule on errors ...  */
307  dma->irq_status |= DMA_ERR;
308}
309
310static unsigned
311bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
312			  address_word addr, unsigned nr_bytes)
313{
314  struct bfin_dma *dma = hw_data (me);
315  bu32 mmr_off;
316  bu32 value;
317  bu16 *value16p;
318  bu32 *value32p;
319  void *valuep;
320
321  /* Invalid access mode is higher priority than missing register.  */
322  if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, true))
323    return 0;
324
325  if (nr_bytes == 4)
326    value = dv_load_4 (source);
327  else
328    value = dv_load_2 (source);
329
330  mmr_off = addr % dma->base;
331  valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
332  value16p = valuep;
333  value32p = valuep;
334
335  HW_TRACE_WRITE ();
336
337  /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
338          But does the HW discard writes or send up IVGHW ?  The sim
339          simply discards atm ... */
340  switch (mmr_off)
341    {
342    case mmr_offset(next_desc_ptr):
343    case mmr_offset(start_addr):
344    case mmr_offset(curr_desc_ptr):
345    case mmr_offset(curr_addr):
346      /* Don't require 32bit access as all DMA MMRs can be used as 16bit.  */
347      if (!bfin_dma_running (dma))
348	{
349	  if (nr_bytes == 4)
350	    *value32p = value;
351	  else
352	    *value16p = value;
353	}
354      else
355	HW_TRACE ((me, "discarding write while dma running"));
356      break;
357    case mmr_offset(x_count):
358    case mmr_offset(x_modify):
359    case mmr_offset(y_count):
360    case mmr_offset(y_modify):
361      if (!bfin_dma_running (dma))
362	*value16p = value;
363      break;
364    case mmr_offset(peripheral_map):
365      if (!bfin_dma_running (dma))
366	{
367	  *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
368	  /* Clear peripheral peer so it gets looked up again.  */
369	  dma->hw_peer = NULL;
370	}
371      else
372	HW_TRACE ((me, "discarding write while dma running"));
373      break;
374    case mmr_offset(config):
375      /* XXX: How to handle updating CONFIG of a running channel ?  */
376      if (nr_bytes == 4)
377	*value32p = value;
378      else
379	*value16p = value;
380
381      if (bfin_dma_enabled (dma))
382	{
383	  dma->irq_status |= DMA_RUN;
384	  bfin_dma_process_desc (me, dma);
385	  /* The writer is the master.  */
386	  if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
387	    bfin_dma_reschedule (me, 1);
388	}
389      else
390	{
391	  dma->irq_status &= ~DMA_RUN;
392	  bfin_dma_reschedule (me, 0);
393	}
394      break;
395    case mmr_offset(irq_status):
396      dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
397      break;
398    case mmr_offset(curr_x_count):
399    case mmr_offset(curr_y_count):
400      if (!bfin_dma_running (dma))
401	*value16p = value;
402      else
403	HW_TRACE ((me, "discarding write while dma running"));
404      break;
405    default:
406      /* XXX: The HW lets the pad regions be read/written ...  */
407      dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
408      return 0;
409    }
410
411  return nr_bytes;
412}
413
414static unsigned
415bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
416			 address_word addr, unsigned nr_bytes)
417{
418  struct bfin_dma *dma = hw_data (me);
419  bu32 mmr_off;
420  bu16 *value16p;
421  bu32 *value32p;
422  void *valuep;
423
424  /* Invalid access mode is higher priority than missing register.  */
425  if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, false))
426    return 0;
427
428  mmr_off = addr % dma->base;
429  valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
430  value16p = valuep;
431  value32p = valuep;
432
433  HW_TRACE_READ ();
434
435  /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved.  */
436  if (nr_bytes == 4)
437    dv_store_4 (dest, *value32p);
438  else
439    dv_store_2 (dest, *value16p);
440
441  return nr_bytes;
442}
443
444static unsigned
445bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
446			  unsigned_word addr, unsigned nr_bytes)
447{
448  struct bfin_dma *dma = hw_data (me);
449  unsigned ret, ele_count;
450
451  HW_TRACE_DMA_READ ();
452
453  /* If someone is trying to read from me, I have to be enabled.  */
454  if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
455    return 0;
456
457  /* XXX: handle x_modify ...  */
458  ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
459  /* Ignore partial writes.  */
460  ele_count = ret / dma->ele_size;
461  /* Has the DMA stalled ?  abort for now.  */
462  if (!ele_count)
463    return 0;
464
465  dma->curr_addr += ele_count * dma->x_modify;
466  dma->curr_x_count -= ele_count;
467
468  if (dma->curr_x_count == 0)
469    bfin_dma_finish_x (me, dma);
470
471  return ret;
472}
473
474static unsigned
475bfin_dma_dma_write_buffer (struct hw *me, const void *source,
476			   int space, unsigned_word addr,
477			   unsigned nr_bytes,
478			   int violate_read_only_section)
479{
480  struct bfin_dma *dma = hw_data (me);
481  unsigned ret, ele_count;
482
483  HW_TRACE_DMA_WRITE ();
484
485  /* If someone is trying to write to me, I have to be enabled.  */
486  if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
487    return 0;
488
489  /* XXX: handle x_modify ...  */
490  ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
491  /* Ignore partial writes.  */
492  ele_count = ret / dma->ele_size;
493  /* Has the DMA stalled ?  abort for now.  */
494  if (!ele_count)
495    return 0;
496
497  dma->curr_addr += ele_count * dma->x_modify;
498  dma->curr_x_count -= ele_count;
499
500  if (dma->curr_x_count == 0)
501    bfin_dma_finish_x (me, dma);
502
503  return ret;
504}
505
506static const struct hw_port_descriptor bfin_dma_ports[] =
507{
508  { "di", 0, 0, output_port, }, /* DMA Interrupt */
509  { NULL, 0, 0, 0, },
510};
511
512static void
513attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
514{
515  address_word attach_address;
516  int attach_space;
517  unsigned attach_size;
518  reg_property_spec reg;
519
520  if (hw_find_property (me, "reg") == NULL)
521    hw_abort (me, "Missing \"reg\" property");
522
523  if (!hw_find_reg_array_property (me, "reg", 0, &reg))
524    hw_abort (me, "\"reg\" property must contain three addr/size entries");
525
526  hw_unit_address_to_attach_address (hw_parent (me),
527				     &reg.address,
528				     &attach_space, &attach_address, me);
529  hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
530
531  if (attach_size != BFIN_MMR_DMA_SIZE)
532    hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
533
534  hw_attach_address (hw_parent (me),
535		     0, attach_space, attach_address, attach_size, me);
536
537  dma->base = attach_address;
538}
539
540static void
541bfin_dma_finish (struct hw *me)
542{
543  struct bfin_dma *dma;
544
545  dma = HW_ZALLOC (me, struct bfin_dma);
546
547  set_hw_data (me, dma);
548  set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
549  set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
550  set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
551  set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
552  set_hw_ports (me, bfin_dma_ports);
553
554  attach_bfin_dma_regs (me, dma);
555
556  /* Initialize the DMA Channel.  */
557  dma->peripheral_map = bfin_dmac_default_pmap (me);
558}
559
560const struct hw_descriptor dv_bfin_dma_descriptor[] =
561{
562  {"bfin_dma", bfin_dma_finish,},
563  {NULL, NULL},
564};
565