dv-bfin_dma.c revision 1.1.1.9
1/* Blackfin Direct Memory Access (DMA) Channel model.
2
3   Copyright (C) 2010-2023 Free Software Foundation, Inc.
4   Contributed by Analog Devices, Inc.
5
6   This file is part of simulators.
7
8   This program is free software; you can redistribute it and/or modify
9   it under the terms of the GNU General Public License as published by
10   the Free Software Foundation; either version 3 of the License, or
11   (at your option) any later version.
12
13   This program is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16   GNU General Public License for more details.
17
18   You should have received a copy of the GNU General Public License
19   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20
21/* This must come before any other includes.  */
22#include "defs.h"
23
24#include <stdlib.h>
25
26#include "sim-main.h"
27#include "devices.h"
28#include "hw-device.h"
29#include "dv-bfin_dma.h"
30#include "dv-bfin_dmac.h"
31
32/* Note: This DMA implementation requires the producer to be the master when
33         the peer is MDMA.  The source is always a slave.  This way we don't
34         have the two DMA devices thrashing each other with one trying to
35         write and the other trying to read.  */
36
37struct bfin_dma
38{
39  /* This top portion matches common dv_bfin struct.  */
40  bu32 base;
41  struct hw *dma_master;
42  bool acked;
43
44  struct hw_event *handler;
45  unsigned ele_size;
46  struct hw *hw_peer;
47
48  /* Order after here is important -- matches hardware MMR layout.  */
49  union {
50    struct { bu16 ndpl, ndph; };
51    bu32 next_desc_ptr;
52  };
53  union {
54    struct { bu16 sal, sah; };
55    bu32 start_addr;
56  };
57  bu16 BFIN_MMR_16 (config);
58  bu32 _pad0;
59  bu16 BFIN_MMR_16 (x_count);
60  bs16 BFIN_MMR_16 (x_modify);
61  bu16 BFIN_MMR_16 (y_count);
62  bs16 BFIN_MMR_16 (y_modify);
63  bu32 curr_desc_ptr, curr_addr;
64  bu16 BFIN_MMR_16 (irq_status);
65  bu16 BFIN_MMR_16 (peripheral_map);
66  bu16 BFIN_MMR_16 (curr_x_count);
67  bu32 _pad1;
68  bu16 BFIN_MMR_16 (curr_y_count);
69  bu32 _pad2;
70};
71#define mmr_base()      offsetof(struct bfin_dma, next_desc_ptr)
72#define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
73
74static const char * const mmr_names[] =
75{
76  "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
77  "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
78  "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
79};
80#define mmr_name(off) mmr_names[(off) / 4]
81
82static bool
83bfin_dma_enabled (struct bfin_dma *dma)
84{
85  return (dma->config & DMAEN);
86}
87
88static bool
89bfin_dma_running (struct bfin_dma *dma)
90{
91  return (dma->irq_status & DMA_RUN);
92}
93
94static struct hw *
95bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
96{
97  if (dma->hw_peer)
98    return dma->hw_peer;
99  return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
100}
101
102static void
103bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
104{
105  bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
106  bu16 _flows[9], *flows = _flows;
107
108  HW_TRACE ((me, "dma starting up %#x", dma->config));
109
110  switch (dma->config & WDSIZE)
111    {
112    case WDSIZE_32:
113      dma->ele_size = 4;
114      break;
115    case WDSIZE_16:
116      dma->ele_size = 2;
117      break;
118    default:
119      dma->ele_size = 1;
120      break;
121    }
122
123  /* Address has to be mutiple of transfer size.  */
124  if (dma->start_addr & (dma->ele_size - 1))
125    dma->irq_status |= DMA_ERR;
126
127  if (dma->ele_size != (unsigned) abs (dma->x_modify))
128    hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
129	      dma->config, dma->x_modify);
130
131  switch (dma->config & DMAFLOW)
132    {
133    case DMAFLOW_AUTO:
134    case DMAFLOW_STOP:
135      if (ndsize)
136	hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
137      break;
138    case DMAFLOW_ARRAY:
139      if (ndsize == 0 || ndsize > 7)
140	hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
141      sim_read (hw_system (me), dma->curr_desc_ptr, flows, ndsize * 2);
142      break;
143    case DMAFLOW_SMALL:
144      if (ndsize == 0 || ndsize > 8)
145	hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
146      sim_read (hw_system (me), dma->next_desc_ptr, flows, ndsize * 2);
147      break;
148    case DMAFLOW_LARGE:
149      if (ndsize == 0 || ndsize > 9)
150	hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
151      sim_read (hw_system (me), dma->next_desc_ptr, flows, ndsize * 2);
152      break;
153    default:
154      hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
155    }
156
157  if (ndsize)
158    {
159      bu8 idx;
160      bu16 *stores[] = {
161	&dma->sal,
162	&dma->sah,
163	&dma->config,
164	&dma->x_count,
165	(void *) &dma->x_modify,
166	&dma->y_count,
167	(void *) &dma->y_modify,
168      };
169
170      switch (dma->config & DMAFLOW)
171	{
172	case DMAFLOW_LARGE:
173	  dma->ndph = _flows[1];
174	  --ndsize;
175	  ++flows;
176	case DMAFLOW_SMALL:
177	  dma->ndpl = _flows[0];
178	  --ndsize;
179	  ++flows;
180	  break;
181	}
182
183      for (idx = 0; idx < ndsize; ++idx)
184	*stores[idx] = flows[idx];
185    }
186
187  dma->curr_desc_ptr = dma->next_desc_ptr;
188  dma->curr_addr = dma->start_addr;
189  dma->curr_x_count = dma->x_count ? : 0xffff;
190  dma->curr_y_count = dma->y_count ? : 0xffff;
191}
192
193static int
194bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
195{
196  /* XXX: This would be the time to process the next descriptor.  */
197  /* XXX: Should this toggle Enable in dma->config ?  */
198
199  if (dma->config & DI_EN)
200    hw_port_event (me, 0, 1);
201
202  if ((dma->config & DMA2D) && dma->curr_y_count > 1)
203    {
204      dma->curr_y_count -= 1;
205      dma->curr_x_count = dma->x_count;
206
207      /* With 2D, last X transfer does not modify curr_addr.  */
208      dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
209
210      return 1;
211    }
212
213  switch (dma->config & DMAFLOW)
214    {
215    case DMAFLOW_STOP:
216      HW_TRACE ((me, "dma is complete"));
217      dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
218      return 0;
219    default:
220      bfin_dma_process_desc (me, dma);
221      return 1;
222    }
223}
224
225static void bfin_dma_hw_event_callback (struct hw *, void *);
226
227static void
228bfin_dma_reschedule (struct hw *me, unsigned delay)
229{
230  struct bfin_dma *dma = hw_data (me);
231  if (dma->handler)
232    {
233      hw_event_queue_deschedule (me, dma->handler);
234      dma->handler = NULL;
235    }
236  if (!delay)
237    return;
238  HW_TRACE ((me, "scheduling next process in %u", delay));
239  dma->handler = hw_event_queue_schedule (me, delay,
240					  bfin_dma_hw_event_callback, dma);
241}
242
243/* Chew through the DMA over and over.  */
244static void
245bfin_dma_hw_event_callback (struct hw *me, void *data)
246{
247  struct bfin_dma *dma = data;
248  struct hw *peer;
249  struct dv_bfin *bfin_peer;
250  bu8 buf[4096];
251  unsigned ret, nr_bytes, ele_count;
252
253  dma->handler = NULL;
254  peer = bfin_dma_get_peer (me, dma);
255  bfin_peer = hw_data (peer);
256  ret = 0;
257  if (dma->x_modify < 0)
258    /* XXX: This sucks performance wise.  */
259    nr_bytes = dma->ele_size;
260  else
261    nr_bytes = min (sizeof (buf), dma->curr_x_count * dma->ele_size);
262
263  /* Pumping a chunk!  */
264  bfin_peer->dma_master = me;
265  bfin_peer->acked = false;
266  if (dma->config & WNR)
267    {
268      HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
269		 (unsigned long) dma->curr_addr, nr_bytes));
270
271      ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
272      /* Has the DMA stalled ?  abort for now.  */
273      if (ret == 0)
274	goto reschedule;
275      /* XXX: How to handle partial DMA transfers ?  */
276      if (ret % dma->ele_size)
277	goto error;
278      ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
279    }
280  else
281    {
282      HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
283		 (unsigned long) dma->curr_addr, nr_bytes));
284
285      ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
286      if (ret == 0)
287	goto reschedule;
288      /* XXX: How to handle partial DMA transfers ?  */
289      if (ret % dma->ele_size)
290	goto error;
291      ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
292      if (ret == 0)
293	goto reschedule;
294    }
295
296  /* Ignore partial writes.  */
297  ele_count = ret / dma->ele_size;
298  dma->curr_addr += ele_count * dma->x_modify;
299  dma->curr_x_count -= ele_count;
300
301  if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
302    /* Still got work to do, so schedule again.  */
303 reschedule:
304    bfin_dma_reschedule (me, ret ? 1 : 5000);
305
306  return;
307
308 error:
309  /* Don't reschedule on errors ...  */
310  dma->irq_status |= DMA_ERR;
311}
312
313static unsigned
314bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
315			  address_word addr, unsigned nr_bytes)
316{
317  struct bfin_dma *dma = hw_data (me);
318  bu32 mmr_off;
319  bu32 value;
320  bu16 *value16p;
321  bu32 *value32p;
322  void *valuep;
323
324  /* Invalid access mode is higher priority than missing register.  */
325  if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, true))
326    return 0;
327
328  if (nr_bytes == 4)
329    value = dv_load_4 (source);
330  else
331    value = dv_load_2 (source);
332
333  mmr_off = addr % dma->base;
334  valuep = (void *)((uintptr_t)dma + mmr_base() + mmr_off);
335  value16p = valuep;
336  value32p = valuep;
337
338  HW_TRACE_WRITE ();
339
340  /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
341          But does the HW discard writes or send up IVGHW ?  The sim
342          simply discards atm ... */
343  switch (mmr_off)
344    {
345    case mmr_offset(next_desc_ptr):
346    case mmr_offset(start_addr):
347    case mmr_offset(curr_desc_ptr):
348    case mmr_offset(curr_addr):
349      /* Don't require 32bit access as all DMA MMRs can be used as 16bit.  */
350      if (!bfin_dma_running (dma))
351	{
352	  if (nr_bytes == 4)
353	    *value32p = value;
354	  else
355	    *value16p = value;
356	}
357      else
358	HW_TRACE ((me, "discarding write while dma running"));
359      break;
360    case mmr_offset(x_count):
361    case mmr_offset(x_modify):
362    case mmr_offset(y_count):
363    case mmr_offset(y_modify):
364      if (!bfin_dma_running (dma))
365	*value16p = value;
366      break;
367    case mmr_offset(peripheral_map):
368      if (!bfin_dma_running (dma))
369	{
370	  *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
371	  /* Clear peripheral peer so it gets looked up again.  */
372	  dma->hw_peer = NULL;
373	}
374      else
375	HW_TRACE ((me, "discarding write while dma running"));
376      break;
377    case mmr_offset(config):
378      /* XXX: How to handle updating CONFIG of a running channel ?  */
379      if (nr_bytes == 4)
380	*value32p = value;
381      else
382	*value16p = value;
383
384      if (bfin_dma_enabled (dma))
385	{
386	  dma->irq_status |= DMA_RUN;
387	  bfin_dma_process_desc (me, dma);
388	  /* The writer is the master.  */
389	  if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
390	    bfin_dma_reschedule (me, 1);
391	}
392      else
393	{
394	  dma->irq_status &= ~DMA_RUN;
395	  bfin_dma_reschedule (me, 0);
396	}
397      break;
398    case mmr_offset(irq_status):
399      dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
400      break;
401    case mmr_offset(curr_x_count):
402    case mmr_offset(curr_y_count):
403      if (!bfin_dma_running (dma))
404	*value16p = value;
405      else
406	HW_TRACE ((me, "discarding write while dma running"));
407      break;
408    default:
409      /* XXX: The HW lets the pad regions be read/written ...  */
410      dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
411      return 0;
412    }
413
414  return nr_bytes;
415}
416
417static unsigned
418bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
419			 address_word addr, unsigned nr_bytes)
420{
421  struct bfin_dma *dma = hw_data (me);
422  bu32 mmr_off;
423  bu16 *value16p;
424  bu32 *value32p;
425  void *valuep;
426
427  /* Invalid access mode is higher priority than missing register.  */
428  if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, false))
429    return 0;
430
431  mmr_off = addr % dma->base;
432  valuep = (void *)((uintptr_t)dma + mmr_base() + mmr_off);
433  value16p = valuep;
434  value32p = valuep;
435
436  HW_TRACE_READ ();
437
438  /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved.  */
439  if (nr_bytes == 4)
440    dv_store_4 (dest, *value32p);
441  else
442    dv_store_2 (dest, *value16p);
443
444  return nr_bytes;
445}
446
447static unsigned
448bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
449			  unsigned_word addr, unsigned nr_bytes)
450{
451  struct bfin_dma *dma = hw_data (me);
452  unsigned ret, ele_count;
453
454  HW_TRACE_DMA_READ ();
455
456  /* If someone is trying to read from me, I have to be enabled.  */
457  if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
458    return 0;
459
460  /* XXX: handle x_modify ...  */
461  ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
462  /* Ignore partial writes.  */
463  ele_count = ret / dma->ele_size;
464  /* Has the DMA stalled ?  abort for now.  */
465  if (!ele_count)
466    return 0;
467
468  dma->curr_addr += ele_count * dma->x_modify;
469  dma->curr_x_count -= ele_count;
470
471  if (dma->curr_x_count == 0)
472    bfin_dma_finish_x (me, dma);
473
474  return ret;
475}
476
477static unsigned
478bfin_dma_dma_write_buffer (struct hw *me, const void *source,
479			   int space, unsigned_word addr,
480			   unsigned nr_bytes,
481			   int violate_read_only_section)
482{
483  struct bfin_dma *dma = hw_data (me);
484  unsigned ret, ele_count;
485
486  HW_TRACE_DMA_WRITE ();
487
488  /* If someone is trying to write to me, I have to be enabled.  */
489  if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
490    return 0;
491
492  /* XXX: handle x_modify ...  */
493  ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
494  /* Ignore partial writes.  */
495  ele_count = ret / dma->ele_size;
496  /* Has the DMA stalled ?  abort for now.  */
497  if (!ele_count)
498    return 0;
499
500  dma->curr_addr += ele_count * dma->x_modify;
501  dma->curr_x_count -= ele_count;
502
503  if (dma->curr_x_count == 0)
504    bfin_dma_finish_x (me, dma);
505
506  return ret;
507}
508
509static const struct hw_port_descriptor bfin_dma_ports[] =
510{
511  { "di", 0, 0, output_port, }, /* DMA Interrupt */
512  { NULL, 0, 0, 0, },
513};
514
515static void
516attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
517{
518  address_word attach_address;
519  int attach_space;
520  unsigned attach_size;
521  reg_property_spec reg;
522
523  if (hw_find_property (me, "reg") == NULL)
524    hw_abort (me, "Missing \"reg\" property");
525
526  if (!hw_find_reg_array_property (me, "reg", 0, &reg))
527    hw_abort (me, "\"reg\" property must contain three addr/size entries");
528
529  hw_unit_address_to_attach_address (hw_parent (me),
530				     &reg.address,
531				     &attach_space, &attach_address, me);
532  hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
533
534  if (attach_size != BFIN_MMR_DMA_SIZE)
535    hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
536
537  hw_attach_address (hw_parent (me),
538		     0, attach_space, attach_address, attach_size, me);
539
540  dma->base = attach_address;
541}
542
543static void
544bfin_dma_finish (struct hw *me)
545{
546  struct bfin_dma *dma;
547
548  dma = HW_ZALLOC (me, struct bfin_dma);
549
550  set_hw_data (me, dma);
551  set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
552  set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
553  set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
554  set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
555  set_hw_ports (me, bfin_dma_ports);
556
557  attach_bfin_dma_regs (me, dma);
558
559  /* Initialize the DMA Channel.  */
560  dma->peripheral_map = bfin_dmac_default_pmap (me);
561}
562
563const struct hw_descriptor dv_bfin_dma_descriptor[] =
564{
565  {"bfin_dma", bfin_dma_finish,},
566  {NULL, NULL},
567};
568