1/* frv cache model.
2   Copyright (C) 1999, 2000, 2001, 2003, 2007 Free Software Foundation, Inc.
3   Contributed by Red Hat.
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3 of the License, or
10(at your option) any later version.
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19
20#define WANT_CPU frvbf
21#define WANT_CPU_FRVBF
22
23#include "libiberty.h"
24#include "sim-main.h"
25#include "cache.h"
26#include "bfd.h"
27
28void
29frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
30{
31  int elements;
32  int i, j;
33  SIM_DESC sd;
34
35  /* Set defaults for fields which are not initialized.  */
36  sd = CPU_STATE (cpu);
37  switch (STATE_ARCHITECTURE (sd)->mach)
38    {
39    case bfd_mach_fr400:
40    case bfd_mach_fr450:
41      if (cache->configured_sets == 0)
42	cache->configured_sets = 512;
43      if (cache->configured_ways == 0)
44	cache->configured_ways = 2;
45      if (cache->line_size == 0)
46	cache->line_size = 32;
47      if (cache->memory_latency == 0)
48	cache->memory_latency = 20;
49      break;
50    case bfd_mach_fr550:
51      if (cache->configured_sets == 0)
52	cache->configured_sets = 128;
53      if (cache->configured_ways == 0)
54	cache->configured_ways = 4;
55      if (cache->line_size == 0)
56	cache->line_size = 64;
57      if (cache->memory_latency == 0)
58	cache->memory_latency = 20;
59      break;
60    default:
61      if (cache->configured_sets == 0)
62	cache->configured_sets = 64;
63      if (cache->configured_ways == 0)
64	cache->configured_ways = 4;
65      if (cache->line_size == 0)
66	cache->line_size = 64;
67      if (cache->memory_latency == 0)
68	cache->memory_latency = 20;
69      break;
70    }
71
72  frv_cache_reconfigure (cpu, cache);
73
74  /* First allocate the cache storage based on the given dimensions.  */
75  elements = cache->sets * cache->ways;
76  cache->tag_storage = (FRV_CACHE_TAG *)
77    zalloc (elements * sizeof (*cache->tag_storage));
78  cache->data_storage = (char *) xmalloc (elements * cache->line_size);
79
80  /* Initialize the pipelines and status buffers.  */
81  for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
82    {
83      cache->pipeline[i].requests = NULL;
84      cache->pipeline[i].status.flush.valid = 0;
85      cache->pipeline[i].status.return_buffer.valid = 0;
86      cache->pipeline[i].status.return_buffer.data
87	= (char *) xmalloc (cache->line_size);
88      for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
89	cache->pipeline[i].stages[j].request = NULL;
90    }
91  cache->BARS.valid = 0;
92  cache->NARS.valid = 0;
93
94  /* Now set the cache state.  */
95  cache->cpu = cpu;
96  cache->statistics.accesses = 0;
97  cache->statistics.hits = 0;
98}
99
100void
101frv_cache_term (FRV_CACHE *cache)
102{
103  /* Free the cache storage.  */
104  free (cache->tag_storage);
105  free (cache->data_storage);
106  free (cache->pipeline[LS].status.return_buffer.data);
107  free (cache->pipeline[LD].status.return_buffer.data);
108}
109
110/* Reset the cache configuration based on registers in the cpu.  */
111void
112frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
113{
114  int ihsr8;
115  int icdm;
116  SIM_DESC sd;
117
118  /* Set defaults for fields which are not initialized.  */
119  sd = CPU_STATE (current_cpu);
120  switch (STATE_ARCHITECTURE (sd)->mach)
121    {
122    case bfd_mach_fr550:
123      if (cache == CPU_INSN_CACHE (current_cpu))
124	{
125	  ihsr8 = GET_IHSR8 ();
126	  icdm = GET_IHSR8_ICDM (ihsr8);
127	  /* If IHSR8.ICDM is set, then the cache becomes a one way cache.  */
128	  if (icdm)
129	    {
130	      cache->sets = cache->sets * cache->ways;
131	      cache->ways = 1;
132	      break;
133	    }
134	}
135      /* fall through */
136    default:
137      /* Set the cache to its original settings.  */
138      cache->sets = cache->configured_sets;
139      cache->ways = cache->configured_ways;
140      break;
141    }
142}
143
144/* Determine whether the given cache is enabled.  */
145int
146frv_cache_enabled (FRV_CACHE *cache)
147{
148  SIM_CPU *current_cpu = cache->cpu;
149  int hsr0 = GET_HSR0 ();
150  if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
151    return 1;
152  if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
153    return 1;
154  return 0;
155}
156
157/* Determine whether the given address is RAM access, assuming that HSR0.RME
158   is set.  */
159static int
160ram_access (FRV_CACHE *cache, USI address)
161{
162  int ihsr8;
163  int cwe;
164  USI start, end, way_size;
165  SIM_CPU *current_cpu = cache->cpu;
166  SIM_DESC sd = CPU_STATE (current_cpu);
167
168  switch (STATE_ARCHITECTURE (sd)->mach)
169    {
170    case bfd_mach_fr550:
171      /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access.  */
172      ihsr8 = GET_IHSR8 ();
173      if (cache == CPU_INSN_CACHE (current_cpu))
174	{
175	  start = 0xfe000000;
176	  end = 0xfe008000;
177	  cwe = GET_IHSR8_ICWE (ihsr8);
178	}
179      else
180	{
181	  start = 0xfe400000;
182	  end = 0xfe408000;
183	  cwe = GET_IHSR8_DCWE (ihsr8);
184	}
185      way_size = (end - start) / 4;
186      end -= way_size * cwe;
187      return address >= start && address < end;
188    default:
189      break;
190    }
191
192  return 1; /* RAM access */
193}
194
195/* Determine whether the given address should be accessed without using
196   the cache.  */
197static int
198non_cache_access (FRV_CACHE *cache, USI address)
199{
200  int hsr0;
201  SIM_DESC sd;
202  SIM_CPU *current_cpu = cache->cpu;
203
204  sd = CPU_STATE (current_cpu);
205  switch (STATE_ARCHITECTURE (sd)->mach)
206    {
207    case bfd_mach_fr400:
208    case bfd_mach_fr450:
209      if (address >= 0xff000000
210	  || address >= 0xfe000000 && address <= 0xfeffffff)
211	return 1; /* non-cache access */
212      break;
213    case bfd_mach_fr550:
214      if (address >= 0xff000000
215	  || address >= 0xfeff0000 && address <= 0xfeffffff)
216	return 1; /* non-cache access */
217      if (cache == CPU_INSN_CACHE (current_cpu))
218	{
219	  if (address >= 0xfe000000 && address <= 0xfe007fff)
220	    return 1; /* non-cache access */
221	}
222      else if (address >= 0xfe400000 && address <= 0xfe407fff)
223	return 1; /* non-cache access */
224      break;
225    default:
226      if (address >= 0xff000000
227	  || address >= 0xfeff0000 && address <= 0xfeffffff)
228	return 1; /* non-cache access */
229      if (cache == CPU_INSN_CACHE (current_cpu))
230	{
231	  if (address >= 0xfe000000 && address <= 0xfe003fff)
232	    return 1; /* non-cache access */
233	}
234      else if (address >= 0xfe400000 && address <= 0xfe403fff)
235	return 1; /* non-cache access */
236      break;
237    }
238
239  hsr0 = GET_HSR0 ();
240  if (GET_HSR0_RME (hsr0))
241    return ram_access (cache, address);
242
243  return 0; /* cache-access */
244}
245
246/* Find the cache line corresponding to the given address.
247   If it is found then 'return_tag' is set to point to the tag for that line
248   and 1 is returned.
249   If it is not found, 'return_tag' is set to point to the tag for the least
250   recently used line and 0 is returned.
251*/
252static int
253get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
254{
255  int set;
256  int way;
257  int bits;
258  USI tag;
259  FRV_CACHE_TAG *found;
260  FRV_CACHE_TAG *available;
261
262  ++cache->statistics.accesses;
263
264  /* First calculate which set this address will fall into. Do this by
265     shifting out the bits representing the offset within the line and
266     then keeping enough bits to index the set.  */
267  set = address & ~(cache->line_size - 1);
268  for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
269    set >>= 1;
270  set &= (cache->sets - 1);
271
272  /* Now search the set for a valid tag which matches this address.  At the
273     same time make note of the least recently used tag, which we will return
274     if no match is found.  */
275  available = NULL;
276  tag = CACHE_ADDRESS_TAG (cache, address);
277  for (way = 0; way < cache->ways; ++way)
278    {
279      found = CACHE_TAG (cache, set, way);
280      /* This tag is available as the least recently used if it is the
281	 least recently used seen so far and it is not locked.  */
282      if (! found->locked && (available == NULL || available->lru > found->lru))
283	available = found;
284      if (found->valid && found->tag == tag)
285	{
286	  *return_tag = found;
287	  ++cache->statistics.hits;
288	  return 1; /* found it */
289	}
290    }
291
292  *return_tag = available;
293  return 0; /* not found */
294}
295
296/* Write the given data out to memory.  */
297static void
298write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
299{
300  SIM_CPU *cpu = cache->cpu;
301  IADDR pc = CPU_PC_GET (cpu);
302  int write_index = 0;
303
304  switch (length)
305    {
306    case 1:
307    default:
308      PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
309      break;
310    case 2:
311      PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
312      break;
313    case 4:
314      PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
315      break;
316    case 8:
317      PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
318      break;
319    }
320
321  for (write_index = 0; write_index < length; ++write_index)
322    {
323      /* TODO: Better way to copy memory than a byte at a time?  */
324      sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
325				  data[write_index]);
326    }
327}
328
329/* Write a cache line out to memory.  */
330static void
331write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
332{
333  SI address = tag->tag;
334  int set = CACHE_TAG_SET_NUMBER (cache, tag);
335  int bits;
336  for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
337    set <<= 1;
338  address |= set;
339  write_data_to_memory (cache, address, tag->line, cache->line_size);
340}
341
342static void
343read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
344		       int length)
345{
346  PCADDR pc = CPU_PC_GET (current_cpu);
347  int i;
348  PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
349  for (i = 0; i < length; ++i)
350    {
351      /* TODO: Better way to copy memory than a byte at a time?  */
352      buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
353					     address + i);
354    }
355}
356
357/* Fill the given cache line from memory.  */
358static void
359fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
360{
361  PCADDR pc;
362  int line_alignment;
363  SI read_address;
364  SIM_CPU *current_cpu = cache->cpu;
365
366  /* If this line is already valid and the cache is in copy-back mode, then
367     write this line to memory before refilling it.
368     Check the dirty bit first, since it is less likely to be set.  */
369  if (tag->dirty && tag->valid)
370    {
371      int hsr0 = GET_HSR0 ();
372      if (GET_HSR0_CBM (hsr0))
373	write_line_to_memory (cache, tag);
374    }
375  else if (tag->line == NULL)
376    {
377      int line_index = tag - cache->tag_storage;
378      tag->line = cache->data_storage + (line_index * cache->line_size);
379    }
380
381  pc = CPU_PC_GET (current_cpu);
382  line_alignment = cache->line_size - 1;
383  read_address = address & ~line_alignment;
384  read_data_from_memory (current_cpu, read_address, tag->line,
385			 cache->line_size);
386  tag->tag = CACHE_ADDRESS_TAG (cache, address);
387  tag->valid = 1;
388}
389
390/* Update the LRU information for the tags in the same set as the given tag.  */
391static void
392set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
393{
394  /* All tags in the same set are contiguous, so find the beginning of the
395     set by aligning to the size of a set.  */
396  FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
397  FRV_CACHE_TAG *limit = item + cache->ways;
398
399  while (item < limit)
400    {
401      if (item->lru > tag->lru)
402	--item->lru;
403      ++item;
404    }
405  tag->lru = cache->ways; /* Mark as most recently used.  */
406}
407
408/* Update the LRU information for the tags in the same set as the given tag.  */
409static void
410set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
411{
412  /* All tags in the same set are contiguous, so find the beginning of the
413     set by aligning to the size of a set.  */
414  FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
415  FRV_CACHE_TAG *limit = item + cache->ways;
416
417  while (item < limit)
418    {
419      if (item->lru != 0 && item->lru < tag->lru)
420	++item->lru;
421      ++item;
422    }
423  tag->lru = 0; /* Mark as least recently used.  */
424}
425
426/* Find the line containing the given address and load it if it is not
427   already loaded.
428   Returns the tag of the requested line.  */
429static FRV_CACHE_TAG *
430find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
431{
432  /* See if this data is already in the cache.  */
433  FRV_CACHE_TAG *tag;
434  int found = get_tag (cache, address, &tag);
435
436  /* Fill the line from memory, if it is not valid.  */
437  if (! found)
438    {
439      /* The tag could be NULL is all ways in the set were used and locked.  */
440      if (tag == NULL)
441	return tag;
442
443      fill_line_from_memory (cache, tag, address);
444      tag->dirty = 0;
445    }
446
447  /* Update the LRU information for the tags in this set.  */
448  set_most_recently_used (cache, tag);
449
450  return tag;
451}
452
453static void
454copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
455			    SI address)
456{
457  /* A cache line was available for the data.
458     Copy the data from the cache line to the output buffer.  */
459  memcpy (cache->pipeline[pipe].status.return_buffer.data,
460	  tag->line, cache->line_size);
461  cache->pipeline[pipe].status.return_buffer.address
462    = address & ~(cache->line_size - 1);
463  cache->pipeline[pipe].status.return_buffer.valid = 1;
464}
465
466static void
467copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
468{
469  address &= ~(cache->line_size - 1);
470  read_data_from_memory (cache->cpu, address,
471			 cache->pipeline[pipe].status.return_buffer.data,
472			 cache->line_size);
473  cache->pipeline[pipe].status.return_buffer.address = address;
474  cache->pipeline[pipe].status.return_buffer.valid = 1;
475}
476
477static void
478set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
479{
480  cache->pipeline[pipe].status.return_buffer.reqno = reqno;
481}
482
483/* Read data from the given cache.
484   Returns the number of cycles required to obtain the data.  */
485int
486frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
487{
488  FRV_CACHE_TAG *tag;
489
490  if (non_cache_access (cache, address))
491    {
492      copy_memory_to_return_buffer (cache, pipe, address);
493      return 1;
494    }
495
496  tag = find_or_retrieve_cache_line (cache, address);
497
498  if (tag == NULL)
499    return 0; /* Indicate non-cache-access.  */
500
501  /* A cache line was available for the data.
502     Copy the data from the cache line to the output buffer.  */
503  copy_line_to_return_buffer (cache, pipe, tag, address);
504
505  return 1; /* TODO - number of cycles unknown */
506}
507
508/* Writes data through the given cache.
509   The data is assumed to be in target endian order.
510   Returns the number of cycles required to write the data.  */
511int
512frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
513{
514  int copy_back;
515
516  /* See if this data is already in the cache.  */
517  SIM_CPU *current_cpu = cache->cpu;
518  USI hsr0 = GET_HSR0 ();
519  FRV_CACHE_TAG *tag;
520  int found;
521
522  if (non_cache_access (cache, address))
523    {
524      write_data_to_memory (cache, address, data, length);
525      return 1;
526    }
527
528  found = get_tag (cache, address, &tag);
529
530  /* Write the data to the cache line if one was available and if it is
531     either a hit or a miss in copy-back mode.
532     The tag may be NULL if all ways were in use and locked on a miss.
533  */
534  copy_back = GET_HSR0_CBM (GET_HSR0 ());
535  if (tag != NULL && (found || copy_back))
536    {
537      int line_offset;
538      /* Load the line from memory first, if it was a miss.  */
539      if (! found)
540	fill_line_from_memory (cache, tag, address);
541      line_offset = address & (cache->line_size - 1);
542      memcpy (tag->line + line_offset, data, length);
543      tag->dirty = 1;
544
545      /* Update the LRU information for the tags in this set.  */
546      set_most_recently_used (cache, tag);
547    }
548
549  /* Write the data to memory if there was no line available or we are in
550     write-through (not copy-back mode).  */
551  if (tag == NULL || ! copy_back)
552    {
553      write_data_to_memory (cache, address, data, length);
554      if (tag != NULL)
555	tag->dirty = 0;
556    }
557
558  return 1; /* TODO - number of cycles unknown */
559}
560
561/* Preload the cache line containing the given address. Lock the
562   data if requested.
563   Returns the number of cycles required to write the data.  */
564int
565frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
566{
567  int offset;
568  int lines;
569
570  if (non_cache_access (cache, address))
571    return 1;
572
573  /* preload at least 1 line.  */
574  if (length == 0)
575    length = 1;
576
577  offset = address & (cache->line_size - 1);
578  lines = 1 + (offset + length - 1) / cache->line_size;
579
580  /* Careful with this loop -- length is unsigned.  */
581  for (/**/; lines > 0; --lines)
582    {
583      FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
584      if (lock && tag != NULL)
585	tag->locked = 1;
586      address += cache->line_size;
587    }
588
589  return 1; /* TODO - number of cycles unknown */
590}
591
592/* Unlock the cache line containing the given address.
593   Returns the number of cycles required to unlock the line.  */
594int
595frv_cache_unlock (FRV_CACHE *cache, SI address)
596{
597  FRV_CACHE_TAG *tag;
598  int found;
599
600  if (non_cache_access (cache, address))
601    return 1;
602
603  found = get_tag (cache, address, &tag);
604
605  if (found)
606    tag->locked = 0;
607
608  return 1; /* TODO - number of cycles unknown */
609}
610
611static void
612invalidate_return_buffer (FRV_CACHE *cache, SI address)
613{
614  /* If this address is in one of the return buffers, then invalidate that
615     return buffer.  */
616  address &= ~(cache->line_size - 1);
617  if (address == cache->pipeline[LS].status.return_buffer.address)
618    cache->pipeline[LS].status.return_buffer.valid = 0;
619  if (address == cache->pipeline[LD].status.return_buffer.address)
620    cache->pipeline[LD].status.return_buffer.valid = 0;
621}
622
623/* Invalidate the cache line containing the given address. Flush the
624   data if requested.
625   Returns the number of cycles required to write the data.  */
626int
627frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
628{
629  /* See if this data is already in the cache.  */
630  FRV_CACHE_TAG *tag;
631  int found;
632
633  /* Check for non-cache access.  This operation is still perfromed even if
634     the cache is not currently enabled.  */
635  if (non_cache_access (cache, address))
636    return 1;
637
638  /* If the line is found, invalidate it. If a flush is requested, then flush
639     it if it is dirty.  */
640  found = get_tag (cache, address, &tag);
641  if (found)
642    {
643      SIM_CPU *cpu;
644      /* If a flush is requested, then flush it if it is dirty.  */
645      if (tag->dirty && flush)
646	write_line_to_memory (cache, tag);
647      set_least_recently_used (cache, tag);
648      tag->valid = 0;
649      tag->locked = 0;
650
651      /* If this is the insn cache, then flush the cpu's scache as well.  */
652      cpu = cache->cpu;
653      if (cache == CPU_INSN_CACHE (cpu))
654	scache_flush_cpu (cpu);
655    }
656
657  invalidate_return_buffer (cache, address);
658
659  return 1; /* TODO - number of cycles unknown */
660}
661
662/* Invalidate the entire cache. Flush the data if requested.  */
663int
664frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
665{
666  /* See if this data is already in the cache.  */
667  int elements = cache->sets * cache->ways;
668  FRV_CACHE_TAG *tag = cache->tag_storage;
669  SIM_CPU *cpu;
670  int i;
671
672  for(i = 0; i < elements; ++i, ++tag)
673    {
674      /* If a flush is requested, then flush it if it is dirty.  */
675      if (tag->valid && tag->dirty && flush)
676	write_line_to_memory (cache, tag);
677      tag->valid = 0;
678      tag->locked = 0;
679    }
680
681
682  /* If this is the insn cache, then flush the cpu's scache as well.  */
683  cpu = cache->cpu;
684  if (cache == CPU_INSN_CACHE (cpu))
685    scache_flush_cpu (cpu);
686
687  /* Invalidate both return buffers.  */
688  cache->pipeline[LS].status.return_buffer.valid = 0;
689  cache->pipeline[LD].status.return_buffer.valid = 0;
690
691  return 1; /* TODO - number of cycles unknown */
692}
693
694/* ---------------------------------------------------------------------------
695   Functions for operating the cache in cycle accurate mode.
696   -------------------------------------------------------------------------  */
697/* Convert a VLIW slot to a cache pipeline index.  */
698static int
699convert_slot_to_index (int slot)
700{
701  switch (slot)
702    {
703    case UNIT_I0:
704    case UNIT_C:
705      return LS;
706    case UNIT_I1:
707      return LD;
708    default:
709      abort ();
710    }
711  return 0;
712}
713
714/* Allocate free chains of cache requests.  */
715#define FREE_CHAIN_SIZE 16
716static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
717static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
718
719static void
720allocate_new_cache_requests (void)
721{
722  int i;
723  frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
724					  * sizeof (FRV_CACHE_REQUEST));
725  for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
726    {
727      frv_cache_request_free_chain[i].next
728	= & frv_cache_request_free_chain[i + 1];
729    }
730
731  frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
732}
733
734/* Return the next free request in the queue for the given cache pipeline.  */
735static FRV_CACHE_REQUEST *
736new_cache_request (void)
737{
738  FRV_CACHE_REQUEST *req;
739
740  /* Allocate new elements for the free chain if necessary.  */
741  if (frv_cache_request_free_chain == NULL)
742    allocate_new_cache_requests ();
743
744  req = frv_cache_request_free_chain;
745  frv_cache_request_free_chain = req->next;
746
747  return req;
748}
749
750/* Return the given cache request to the free chain.  */
751static void
752free_cache_request (FRV_CACHE_REQUEST *req)
753{
754  if (req->kind == req_store)
755    {
756      req->next = frv_store_request_free_chain;
757      frv_store_request_free_chain = req;
758    }
759  else
760    {
761      req->next = frv_cache_request_free_chain;
762      frv_cache_request_free_chain = req;
763    }
764}
765
766/* Search the free chain for an existing store request with a buffer that's
767   large enough.  */
768static FRV_CACHE_REQUEST *
769new_store_request (int length)
770{
771  FRV_CACHE_REQUEST *prev = NULL;
772  FRV_CACHE_REQUEST *req;
773  for (req = frv_store_request_free_chain; req != NULL; req = req->next)
774    {
775      if (req->u.store.length == length)
776	break;
777      prev = req;
778    }
779  if (req != NULL)
780    {
781      if (prev == NULL)
782	frv_store_request_free_chain = req->next;
783      else
784	prev->next = req->next;
785      return req;
786    }
787
788  /* No existing request buffer was found, so make a new one.  */
789  req = new_cache_request ();
790  req->kind = req_store;
791  req->u.store.data = xmalloc (length);
792  req->u.store.length = length;
793  return req;
794}
795
796/* Remove the given request from the given pipeline.  */
797static void
798pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
799{
800  FRV_CACHE_REQUEST *next = request->next;
801  FRV_CACHE_REQUEST *prev = request->prev;
802
803  if (prev == NULL)
804    p->requests = next;
805  else
806    prev->next = next;
807
808  if (next != NULL)
809    next->prev = prev;
810}
811
812/* Add the given request to the given pipeline.  */
813static void
814pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
815{
816  FRV_CACHE_REQUEST *prev = NULL;
817  FRV_CACHE_REQUEST *item;
818
819  /* Add the request in priority order.  0 is the highest priority.  */
820  for (item = p->requests; item != NULL; item = item->next)
821    {
822      if (item->priority > request->priority)
823	break;
824      prev = item;
825    }
826
827  request->next = item;
828  request->prev = prev;
829  if (prev == NULL)
830    p->requests = request;
831  else
832    prev->next = request;
833  if (item != NULL)
834    item->prev = request;
835}
836
837/* Requeu the given request from the last of the given pipeline.  */
838static void
839pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
840{
841  FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
842  FRV_CACHE_REQUEST *req = stage->request;
843  stage->request = NULL;
844  pipeline_add_request (p, req);
845}
846
847/* Return the priority lower than the lowest one in this cache pipeline.
848   0 is the highest priority.  */
849static int
850next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
851{
852  int i, j;
853  int pipe;
854  int lowest = 0;
855  FRV_CACHE_REQUEST *req;
856
857  /* Check the priorities of any queued items.  */
858  for (req = pipeline->requests; req != NULL; req = req->next)
859    if (req->priority > lowest)
860      lowest = req->priority;
861
862  /* Check the priorities of items in the pipeline stages.  */
863  for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
864    {
865      FRV_CACHE_STAGE *stage = & pipeline->stages[i];
866      if (stage->request != NULL && stage->request->priority > lowest)
867        lowest = stage->request->priority;
868    }
869
870  /* Check the priorities of load requests waiting in WAR.  These are one
871     higher than the request that spawned them.  */
872  for (i = 0; i < NUM_WARS; ++i)
873    {
874      FRV_CACHE_WAR *war = & pipeline->WAR[i];
875      if (war->valid && war->priority > lowest)
876	lowest = war->priority + 1;
877    }
878
879  /* Check the priorities of any BARS or NARS associated with this pipeline.
880     These are one higher than the request that spawned them.  */
881  pipe = pipeline - cache->pipeline;
882  if (cache->BARS.valid && cache->BARS.pipe == pipe
883      && cache->BARS.priority > lowest)
884    lowest = cache->BARS.priority + 1;
885  if (cache->NARS.valid && cache->NARS.pipe == pipe
886      && cache->NARS.priority > lowest)
887    lowest = cache->NARS.priority + 1;
888
889  /* Return a priority 2 lower than the lowest found.  This allows a WAR
890     request to be generated with a priority greater than this but less than
891     the next higher priority request.  */
892  return lowest + 2;
893}
894
895static void
896add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
897{
898  /* Add the load request to the indexed pipeline.  */
899  FRV_CACHE_REQUEST *req = new_cache_request ();
900  req->kind = req_WAR;
901  req->reqno = war->reqno;
902  req->priority = war->priority;
903  req->address = war->address;
904  req->u.WAR.preload = war->preload;
905  req->u.WAR.lock = war->lock;
906  pipeline_add_request (pipeline, req);
907}
908
909/* Remove the next request from the given pipeline and return it.  */
910static FRV_CACHE_REQUEST *
911pipeline_next_request (FRV_CACHE_PIPELINE *p)
912{
913  FRV_CACHE_REQUEST *first = p->requests;
914  if (first != NULL)
915    pipeline_remove_request (p, first);
916  return first;
917}
918
919/* Return the request which is at the given stage of the given pipeline.  */
920static FRV_CACHE_REQUEST *
921pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
922{
923  return p->stages[stage].request;
924}
925
926static void
927advance_pipelines (FRV_CACHE *cache)
928{
929  int stage;
930  int pipe;
931  FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
932
933  /* Free the final stage requests.  */
934  for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
935    {
936      FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
937      if (req != NULL)
938	free_cache_request (req);
939    }
940
941  /* Shuffle the requests along the pipeline.  */
942  for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
943    {
944      for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
945	pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
946    }
947
948  /* Add a new request to the pipeline.  */
949  for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
950    pipelines[pipe].stages[FIRST_STAGE].request
951      = pipeline_next_request (& pipelines[pipe]);
952}
953
954/* Handle a request for a load from the given address.  */
955void
956frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
957{
958  FRV_CACHE_REQUEST *req;
959
960  /* slot is a UNIT_*.  Convert it to a cache pipeline index.  */
961  int pipe = convert_slot_to_index (slot);
962  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
963
964  /* Add the load request to the indexed pipeline.  */
965  req = new_cache_request ();
966  req->kind = req_load;
967  req->reqno = reqno;
968  req->priority = next_priority (cache, pipeline);
969  req->address = address;
970
971  pipeline_add_request (pipeline, req);
972}
973
974void
975frv_cache_request_store (FRV_CACHE *cache, SI address,
976			 int slot, char *data, unsigned length)
977{
978  FRV_CACHE_REQUEST *req;
979
980  /* slot is a UNIT_*.  Convert it to a cache pipeline index.  */
981  int pipe = convert_slot_to_index (slot);
982  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
983
984  /* Add the load request to the indexed pipeline.  */
985  req = new_store_request (length);
986  req->kind = req_store;
987  req->reqno = NO_REQNO;
988  req->priority = next_priority (cache, pipeline);
989  req->address = address;
990  req->u.store.length = length;
991  memcpy (req->u.store.data, data, length);
992
993  pipeline_add_request (pipeline, req);
994  invalidate_return_buffer (cache, address);
995}
996
997/* Handle a request to invalidate the cache line containing the given address.
998   Flush the data if requested.  */
999void
1000frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
1001			      int slot, int all, int flush)
1002{
1003  FRV_CACHE_REQUEST *req;
1004
1005  /* slot is a UNIT_*.  Convert it to a cache pipeline index.  */
1006  int pipe = convert_slot_to_index (slot);
1007  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1008
1009  /* Add the load request to the indexed pipeline.  */
1010  req = new_cache_request ();
1011  req->kind = req_invalidate;
1012  req->reqno = reqno;
1013  req->priority = next_priority (cache, pipeline);
1014  req->address = address;
1015  req->u.invalidate.all = all;
1016  req->u.invalidate.flush = flush;
1017
1018  pipeline_add_request (pipeline, req);
1019}
1020
1021/* Handle a request to preload the cache line containing the given address.  */
1022void
1023frv_cache_request_preload (FRV_CACHE *cache, SI address,
1024			   int slot, int length, int lock)
1025{
1026  FRV_CACHE_REQUEST *req;
1027
1028  /* slot is a UNIT_*.  Convert it to a cache pipeline index.  */
1029  int pipe = convert_slot_to_index (slot);
1030  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1031
1032  /* Add the load request to the indexed pipeline.  */
1033  req = new_cache_request ();
1034  req->kind = req_preload;
1035  req->reqno = NO_REQNO;
1036  req->priority = next_priority (cache, pipeline);
1037  req->address = address;
1038  req->u.preload.length = length;
1039  req->u.preload.lock = lock;
1040
1041  pipeline_add_request (pipeline, req);
1042  invalidate_return_buffer (cache, address);
1043}
1044
1045/* Handle a request to unlock the cache line containing the given address.  */
1046void
1047frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1048{
1049  FRV_CACHE_REQUEST *req;
1050
1051  /* slot is a UNIT_*.  Convert it to a cache pipeline index.  */
1052  int pipe = convert_slot_to_index (slot);
1053  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1054
1055  /* Add the load request to the indexed pipeline.  */
1056  req = new_cache_request ();
1057  req->kind = req_unlock;
1058  req->reqno = NO_REQNO;
1059  req->priority = next_priority (cache, pipeline);
1060  req->address = address;
1061
1062  pipeline_add_request (pipeline, req);
1063}
1064
1065/* Check whether this address interferes with a pending request of
1066   higher priority.  */
1067static int
1068address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1069		      int pipe)
1070{
1071  int i, j;
1072  int line_mask = ~(cache->line_size - 1);
1073  int other_pipe;
1074  int priority = req->priority;
1075  FRV_CACHE_REQUEST *other_req;
1076  SI other_address;
1077  SI all_address;
1078
1079  address &= line_mask;
1080  all_address = -1 & line_mask;
1081
1082  /* Check for collisions in the queue for this pipeline.  */
1083  for (other_req = cache->pipeline[pipe].requests;
1084       other_req != NULL;
1085       other_req = other_req->next)
1086    {
1087      other_address = other_req->address & line_mask;
1088      if ((address == other_address || address == all_address)
1089	  && priority > other_req->priority)
1090	return 1;
1091    }
1092
1093  /* Check for a collision in the the other pipeline.  */
1094  other_pipe = pipe ^ 1;
1095  other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1096  if (other_req != NULL)
1097    {
1098      other_address = other_req->address & line_mask;
1099      if (address == other_address || address == all_address)
1100	return 1;
1101    }
1102
1103  /* Check for a collision with load requests waiting in WAR.  */
1104  for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1105    {
1106      for (j = 0; j < NUM_WARS; ++j)
1107	{
1108	  FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1109	  if (war->valid
1110	      && (address == (war->address & line_mask)
1111		  || address == all_address)
1112	      && priority > war->priority)
1113	    return 1;
1114	}
1115      /* If this is not a WAR request, then yield to any WAR requests in
1116	 either pipeline or to a higher priority request in the same pipeline.
1117      */
1118      if (req->kind != req_WAR)
1119	{
1120	  for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1121	    {
1122	      other_req = cache->pipeline[i].stages[j].request;
1123	      if (other_req != NULL)
1124		{
1125		  if (other_req->kind == req_WAR)
1126		    return 1;
1127		  if (i == pipe
1128		      && (address == (other_req->address & line_mask)
1129			  || address == all_address)
1130		      && priority > other_req->priority)
1131		    return 1;
1132		}
1133	    }
1134	}
1135    }
1136
1137  /* Check for a collision with load requests waiting in ARS.  */
1138  if (cache->BARS.valid
1139      && (address == (cache->BARS.address & line_mask)
1140	  || address == all_address)
1141      && priority > cache->BARS.priority)
1142    return 1;
1143  if (cache->NARS.valid
1144      && (address == (cache->NARS.address & line_mask)
1145	  || address == all_address)
1146      && priority > cache->NARS.priority)
1147    return 1;
1148
1149  return 0;
1150}
1151
1152/* Wait for a free WAR register in BARS or NARS.  */
1153static void
1154wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1155{
1156  FRV_CACHE_WAR war;
1157  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1158
1159  if (! cache->BARS.valid)
1160    {
1161      cache->BARS.pipe = pipe;
1162      cache->BARS.reqno = req->reqno;
1163      cache->BARS.address = req->address;
1164      cache->BARS.priority = req->priority - 1;
1165      switch (req->kind)
1166	{
1167	case req_load:
1168	  cache->BARS.preload = 0;
1169	  cache->BARS.lock = 0;
1170	  break;
1171	case req_store:
1172	  cache->BARS.preload = 1;
1173	  cache->BARS.lock = 0;
1174	  break;
1175	case req_preload:
1176	  cache->BARS.preload = 1;
1177	  cache->BARS.lock = req->u.preload.lock;
1178	  break;
1179	}
1180      cache->BARS.valid = 1;
1181      return;
1182    }
1183  if (! cache->NARS.valid)
1184    {
1185      cache->NARS.pipe = pipe;
1186      cache->NARS.reqno = req->reqno;
1187      cache->NARS.address = req->address;
1188      cache->NARS.priority = req->priority - 1;
1189      switch (req->kind)
1190	{
1191	case req_load:
1192	  cache->NARS.preload = 0;
1193	  cache->NARS.lock = 0;
1194	  break;
1195	case req_store:
1196	  cache->NARS.preload = 1;
1197	  cache->NARS.lock = 0;
1198	  break;
1199	case req_preload:
1200	  cache->NARS.preload = 1;
1201	  cache->NARS.lock = req->u.preload.lock;
1202	  break;
1203	}
1204      cache->NARS.valid = 1;
1205      return;
1206    }
1207  /* All wait registers are busy, so resubmit this request.  */
1208  pipeline_requeue_request (pipeline);
1209}
1210
1211/* Find a free WAR register and wait for memory to fetch the data.  */
1212static void
1213wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1214{
1215  int war;
1216  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1217
1218  /* Find a valid WAR to  hold this request.  */
1219  for (war = 0; war < NUM_WARS; ++war)
1220    if (! pipeline->WAR[war].valid)
1221      break;
1222  if (war >= NUM_WARS)
1223    {
1224      wait_for_WAR (cache, pipe, req);
1225      return;
1226    }
1227
1228  pipeline->WAR[war].address = req->address;
1229  pipeline->WAR[war].reqno = req->reqno;
1230  pipeline->WAR[war].priority = req->priority - 1;
1231  pipeline->WAR[war].latency = cache->memory_latency + 1;
1232  switch (req->kind)
1233    {
1234    case req_load:
1235      pipeline->WAR[war].preload = 0;
1236      pipeline->WAR[war].lock = 0;
1237      break;
1238    case req_store:
1239      pipeline->WAR[war].preload = 1;
1240      pipeline->WAR[war].lock = 0;
1241      break;
1242    case req_preload:
1243      pipeline->WAR[war].preload = 1;
1244      pipeline->WAR[war].lock = req->u.preload.lock;
1245      break;
1246    }
1247  pipeline->WAR[war].valid = 1;
1248}
1249
1250static void
1251handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1252{
1253  FRV_CACHE_TAG *tag;
1254  SI address = req->address;
1255
1256  /* If this address interferes with an existing request, then requeue it.  */
1257  if (address_interference (cache, address, req, pipe))
1258    {
1259      pipeline_requeue_request (& cache->pipeline[pipe]);
1260      return;
1261    }
1262
1263  if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1264    {
1265      int found = get_tag (cache, address, &tag);
1266
1267      /* If the data was found, return it to the caller.  */
1268      if (found)
1269	{
1270	  set_most_recently_used (cache, tag);
1271	  copy_line_to_return_buffer (cache, pipe, tag, address);
1272	  set_return_buffer_reqno (cache, pipe, req->reqno);
1273	  return;
1274	}
1275    }
1276
1277  /* The data is not in the cache or this is a non-cache access.  We need to
1278     wait for the memory unit to fetch it.  Store this request in the WAR in
1279     the meantime.  */
1280  wait_in_WAR (cache, pipe, req);
1281}
1282
1283static void
1284handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1285{
1286  int found;
1287  FRV_CACHE_WAR war;
1288  FRV_CACHE_TAG *tag;
1289  int length;
1290  int lock;
1291  int offset;
1292  int lines;
1293  int line;
1294  SI address = req->address;
1295  SI cur_address;
1296
1297  if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1298    return;
1299
1300  /* preload at least 1 line.  */
1301  length = req->u.preload.length;
1302  if (length == 0)
1303    length = 1;
1304
1305  /* Make sure that this request does not interfere with a pending request.  */
1306  offset = address & (cache->line_size - 1);
1307  lines = 1 + (offset + length - 1) / cache->line_size;
1308  cur_address = address & ~(cache->line_size - 1);
1309  for (line = 0; line < lines; ++line)
1310    {
1311      /* If this address interferes with an existing request,
1312	 then requeue it.  */
1313      if (address_interference (cache, cur_address, req, pipe))
1314	{
1315	  pipeline_requeue_request (& cache->pipeline[pipe]);
1316	  return;
1317	}
1318      cur_address += cache->line_size;
1319    }
1320
1321  /* Now process each cache line.  */
1322  /* Careful with this loop -- length is unsigned.  */
1323  lock = req->u.preload.lock;
1324  cur_address = address & ~(cache->line_size - 1);
1325  for (line = 0; line < lines; ++line)
1326    {
1327      /* If the data was found, then lock it if requested.  */
1328      found = get_tag (cache, cur_address, &tag);
1329      if (found)
1330	{
1331	  if (lock)
1332	    tag->locked = 1;
1333	}
1334      else
1335	{
1336	  /* The data is not in the cache.  We need to wait for the memory
1337	     unit to fetch it.  Store this request in the WAR in the meantime.
1338	  */
1339	  wait_in_WAR (cache, pipe, req);
1340	}
1341      cur_address += cache->line_size;
1342    }
1343}
1344
1345static void
1346handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1347{
1348  SIM_CPU *current_cpu;
1349  FRV_CACHE_TAG *tag;
1350  int found;
1351  int copy_back;
1352  SI address = req->address;
1353  char *data = req->u.store.data;
1354  int length = req->u.store.length;
1355
1356  /* If this address interferes with an existing request, then requeue it.  */
1357  if (address_interference (cache, address, req, pipe))
1358    {
1359      pipeline_requeue_request (& cache->pipeline[pipe]);
1360      return;
1361    }
1362
1363  /* Non-cache access. Write the data directly to memory.  */
1364  if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1365    {
1366      write_data_to_memory (cache, address, data, length);
1367      return;
1368    }
1369
1370  /* See if the data is in the cache.  */
1371  found = get_tag (cache, address, &tag);
1372
1373  /* Write the data to the cache line if one was available and if it is
1374     either a hit or a miss in copy-back mode.
1375     The tag may be NULL if all ways were in use and locked on a miss.
1376  */
1377  current_cpu = cache->cpu;
1378  copy_back = GET_HSR0_CBM (GET_HSR0 ());
1379  if (tag != NULL && (found || copy_back))
1380    {
1381      int line_offset;
1382      /* Load the line from memory first, if it was a miss.  */
1383      if (! found)
1384	{
1385	  /* We need to wait for the memory unit to fetch the data.
1386	     Store this request in the WAR and requeue the store request.  */
1387	  wait_in_WAR (cache, pipe, req);
1388	  pipeline_requeue_request (& cache->pipeline[pipe]);
1389	  /* Decrement the counts of accesses and hits because when the requeued
1390	     request is processed again, it will appear to be a new access and
1391	     a hit.  */
1392	  --cache->statistics.accesses;
1393	  --cache->statistics.hits;
1394	  return;
1395	}
1396      line_offset = address & (cache->line_size - 1);
1397      memcpy (tag->line + line_offset, data, length);
1398      invalidate_return_buffer (cache, address);
1399      tag->dirty = 1;
1400
1401      /* Update the LRU information for the tags in this set.  */
1402      set_most_recently_used (cache, tag);
1403    }
1404
1405  /* Write the data to memory if there was no line available or we are in
1406     write-through (not copy-back mode).  */
1407  if (tag == NULL || ! copy_back)
1408    {
1409      write_data_to_memory (cache, address, data, length);
1410      if (tag != NULL)
1411	tag->dirty = 0;
1412    }
1413}
1414
1415static void
1416handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1417{
1418  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1419  SI address = req->address;
1420  SI interfere_address = req->u.invalidate.all ? -1 : address;
1421
1422  /* If this address interferes with an existing request, then requeue it.  */
1423  if (address_interference (cache, interfere_address, req, pipe))
1424    {
1425      pipeline_requeue_request (pipeline);
1426      return;
1427    }
1428
1429  /* Invalidate the cache line now.  This function already checks for
1430     non-cache access.  */
1431  if (req->u.invalidate.all)
1432    frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1433  else
1434    frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1435  if (req->u.invalidate.flush)
1436    {
1437      pipeline->status.flush.reqno = req->reqno;
1438      pipeline->status.flush.address = address;
1439      pipeline->status.flush.valid = 1;
1440    }
1441}
1442
1443static void
1444handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1445{
1446  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1447  SI address = req->address;
1448
1449  /* If this address interferes with an existing request, then requeue it.  */
1450  if (address_interference (cache, address, req, pipe))
1451    {
1452      pipeline_requeue_request (pipeline);
1453      return;
1454    }
1455
1456  /* Unlock the cache line.  This function checks for non-cache access.  */
1457  frv_cache_unlock (cache, address);
1458}
1459
1460static void
1461handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1462{
1463  char *buffer;
1464  FRV_CACHE_TAG *tag;
1465  SI address = req->address;
1466
1467  if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1468    {
1469      /* Look for the data in the cache.  The statistics of cache hit or
1470	 miss have already been recorded, so save and restore the stats before
1471	 and after obtaining the cache line.  */
1472      FRV_CACHE_STATISTICS save_stats = cache->statistics;
1473      tag = find_or_retrieve_cache_line (cache, address);
1474      cache->statistics = save_stats;
1475      if (tag != NULL)
1476	{
1477	  if (! req->u.WAR.preload)
1478	    {
1479	      copy_line_to_return_buffer (cache, pipe, tag, address);
1480	      set_return_buffer_reqno (cache, pipe, req->reqno);
1481	    }
1482	  else
1483	    {
1484	      invalidate_return_buffer (cache, address);
1485	      if (req->u.WAR.lock)
1486		tag->locked = 1;
1487	    }
1488	  return;
1489	}
1490    }
1491
1492  /* All cache lines in the set were locked, so just copy the data to the
1493     return buffer directly.  */
1494  if (! req->u.WAR.preload)
1495    {
1496      copy_memory_to_return_buffer (cache, pipe, address);
1497      set_return_buffer_reqno (cache, pipe, req->reqno);
1498    }
1499}
1500
1501/* Resolve any conflicts and/or execute the given requests.  */
1502static void
1503arbitrate_requests (FRV_CACHE *cache)
1504{
1505  int pipe;
1506  /* Simply execute the requests in the final pipeline stages.  */
1507  for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1508    {
1509      FRV_CACHE_REQUEST *req
1510	= pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1511      /* Make sure that there is a request to handle.  */
1512      if (req == NULL)
1513	continue;
1514
1515      /* Handle the request.  */
1516      switch (req->kind)
1517	{
1518	case req_load:
1519	  handle_req_load (cache, pipe, req);
1520	  break;
1521	case req_store:
1522	  handle_req_store (cache, pipe, req);
1523	  break;
1524	case req_invalidate:
1525	  handle_req_invalidate (cache, pipe, req);
1526	  break;
1527	case req_preload:
1528	  handle_req_preload (cache, pipe, req);
1529	  break;
1530	case req_unlock:
1531	  handle_req_unlock (cache, pipe, req);
1532	  break;
1533	case req_WAR:
1534	  handle_req_WAR (cache, pipe, req);
1535	  break;
1536	default:
1537	  abort ();
1538	}
1539    }
1540}
1541
1542/* Move a waiting ARS register to a free WAR register.  */
1543static void
1544move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1545{
1546  /* If BARS is valid for this pipe, then move it to the given WAR. Move
1547     NARS to BARS if it is valid.  */
1548  if (cache->BARS.valid && cache->BARS.pipe == pipe)
1549    {
1550      war->address = cache->BARS.address;
1551      war->reqno = cache->BARS.reqno;
1552      war->priority = cache->BARS.priority;
1553      war->preload = cache->BARS.preload;
1554      war->lock = cache->BARS.lock;
1555      war->latency = cache->memory_latency + 1;
1556      war->valid = 1;
1557      if (cache->NARS.valid)
1558	{
1559	  cache->BARS = cache->NARS;
1560	  cache->NARS.valid = 0;
1561	}
1562      else
1563	cache->BARS.valid = 0;
1564      return;
1565    }
1566  /* If NARS is valid for this pipe, then move it to the given WAR.  */
1567  if (cache->NARS.valid && cache->NARS.pipe == pipe)
1568    {
1569      war->address = cache->NARS.address;
1570      war->reqno = cache->NARS.reqno;
1571      war->priority = cache->NARS.priority;
1572      war->preload = cache->NARS.preload;
1573      war->lock = cache->NARS.lock;
1574      war->latency = cache->memory_latency + 1;
1575      war->valid = 1;
1576      cache->NARS.valid = 0;
1577    }
1578}
1579
1580/* Decrease the latencies of the various states in the cache.  */
1581static void
1582decrease_latencies (FRV_CACHE *cache)
1583{
1584  int pipe, j;
1585  /* Check the WAR registers.  */
1586  for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1587    {
1588      FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1589      for (j = 0; j < NUM_WARS; ++j)
1590	{
1591	  FRV_CACHE_WAR *war = & pipeline->WAR[j];
1592	  if (war->valid)
1593	    {
1594	      --war->latency;
1595	      /* If the latency has expired, then submit a WAR request to the
1596		 pipeline.  */
1597	      if (war->latency <= 0)
1598		{
1599		  add_WAR_request (pipeline, war);
1600		  war->valid = 0;
1601		  move_ARS_to_WAR (cache, pipe, war);
1602		}
1603	    }
1604	}
1605    }
1606}
1607
1608/* Run the cache for the given number of cycles.  */
1609void
1610frv_cache_run (FRV_CACHE *cache, int cycles)
1611{
1612  int i;
1613  for (i = 0; i < cycles; ++i)
1614    {
1615      advance_pipelines (cache);
1616      arbitrate_requests (cache);
1617      decrease_latencies (cache);
1618    }
1619}
1620
1621int
1622frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1623{
1624  SI offset;
1625  FRV_CACHE_TAG *tag;
1626
1627  if (non_cache_access (cache, address))
1628    return 0;
1629
1630  {
1631    FRV_CACHE_STATISTICS save_stats = cache->statistics;
1632    int found = get_tag (cache, address, &tag);
1633    cache->statistics = save_stats;
1634
1635    if (! found)
1636      return 0; /* Indicate non-cache-access.  */
1637  }
1638
1639  /* A cache line was available for the data.
1640     Extract the target data from the line.  */
1641  offset = address & (cache->line_size - 1);
1642  *value = T2H_4 (*(SI *)(tag->line + offset));
1643  return 1;
1644}
1645
1646/* Check the return buffers of the data cache to see if the requested data is
1647   available.  */
1648int
1649frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1650			  unsigned reqno)
1651{
1652  return cache->pipeline[pipe].status.return_buffer.valid
1653    && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1654    && cache->pipeline[pipe].status.return_buffer.address <= address
1655    && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1656       > address;
1657}
1658
1659/* Check to see if the requested data has been flushed.  */
1660int
1661frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1662{
1663  return cache->pipeline[pipe].status.flush.valid
1664    && cache->pipeline[pipe].status.flush.reqno == reqno
1665    && cache->pipeline[pipe].status.flush.address <= address
1666    && cache->pipeline[pipe].status.flush.address + cache->line_size
1667       > address;
1668}
1669