1/*  This file is part of the program psim.
2
3    Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
4
5    This program is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 2 of the License, or
8    (at your option) any later version.
9
10    This program is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14
15    You should have received a copy of the GNU General Public License
16    along with this program; if not, write to the Free Software
17    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19    */
20
21
22#ifndef _VM_C_
23#define _VM_C_
24
25#if 0
26#include "basics.h"
27#include "registers.h"
28#include "device.h"
29#include "corefile.h"
30#include "vm.h"
31#include "interrupts.h"
32#include "mon.h"
33#endif
34
35#include "cpu.h"
36
37/* OEA vs VEA
38
39   For the VEA model, the VM layer is almost transparent.  It's only
40   purpose is to maintain separate core_map's for the instruction
41   and data address spaces.  This being so that writes to instruction
42   space or execution of a data space is prevented.
43
44   For the OEA model things are more complex.  The reason for separate
45   instruction and data models becomes crucial.  The OEA model is
46   built out of three parts.  An instruction map, a data map and an
47   underlying structure that provides access to the VM data kept in
48   main memory. */
49
50
51/* OEA data structures:
52
53   The OEA model maintains internal data structures that shadow the
54   semantics of the various OEA VM registers (BAT, SR, etc).  This
55   allows a simple efficient model of the VM to be implemented.
56
57   Consistency between OEA registers and this model's internal data
58   structures is maintained by updating the structures at
59   `synchronization' points.  Of particular note is that (at the time
60   of writing) the memory data types for BAT registers are rebuilt
61   when ever the processor moves between problem and system states.
62
63   Unpacked values are stored in the OEA so that they correctly align
64   to where they will be needed by the PTE address. */
65
66
67/* Protection table:
68
69   Matrix of processor state, type of access and validity */
70
71typedef enum {
72  om_supervisor_state,
73  om_problem_state,
74  nr_om_modes
75} om_processor_modes;
76
77typedef enum {
78  om_data_read, om_data_write,
79  om_instruction_read, om_access_any,
80  nr_om_access_types
81} om_access_types;
82
83static int om_valid_access[2][4][nr_om_access_types] = {
84  /* read, write, instruction, any */
85  /* K bit == 0 */
86  { /*r  w  i  a       pp */
87    { 1, 1, 1, 1 }, /* 00 */
88    { 1, 1, 1, 1 }, /* 01 */
89    { 1, 1, 1, 1 }, /* 10 */
90    { 1, 0, 1, 1 }, /* 11 */
91  },
92  /* K bit == 1  or P bit valid */
93  { /*r  w  i  a       pp */
94    { 0, 0, 0, 0 }, /* 00 */
95    { 1, 0, 1, 1 }, /* 01 */
96    { 1, 1, 1, 1 }, /* 10 */
97    { 1, 0, 1, 1 }, /* 11 */
98  }
99};
100
101
102/* Bat translation:
103
104   The bat data structure only contains information on valid BAT
105   translations for the current processor mode and type of access. */
106
107typedef struct _om_bat {
108  unsigned_word block_effective_page_index;
109  unsigned_word block_effective_page_index_mask;
110  unsigned_word block_length_mask;
111  unsigned_word block_real_page_number;
112  int protection_bits;
113} om_bat;
114
115enum _nr_om_bat_registers {
116  nr_om_bat_registers = 4
117};
118
119typedef struct _om_bats {
120  int nr_valid_bat_registers;
121  om_bat bat[nr_om_bat_registers];
122} om_bats;
123
124
125/* Segment TLB:
126
127   In this model the 32 and 64 bit segment tables are treated in very
128   similar ways.  The 32bit segment registers are treated as a
129   simplification of the 64bit segment tlb */
130
131enum _om_segment_tlb_constants {
132#if (WITH_TARGET_WORD_BITSIZE == 64)
133  sizeof_segment_table_entry_group = 128,
134  sizeof_segment_table_entry = 16,
135#endif
136  om_segment_tlb_index_start_bit = 32,
137  om_segment_tlb_index_stop_bit = 35,
138  nr_om_segment_tlb_entries = 16,
139  nr_om_segment_tlb_constants
140};
141
142typedef struct _om_segment_tlb_entry {
143  int key[nr_om_modes];
144  om_access_types invalid_access; /* set to instruction if no_execute bit */
145  unsigned_word masked_virtual_segment_id; /* aligned ready for pte group addr */
146#if (WITH_TARGET_WORD_BITSIZE == 64)
147  int is_valid;
148  unsigned_word masked_effective_segment_id;
149#endif
150} om_segment_tlb_entry;
151
152typedef struct _om_segment_tlb {
153  om_segment_tlb_entry entry[nr_om_segment_tlb_entries];
154} om_segment_tlb;
155
156
157/* Page TLB:
158
159   This OEA model includes a small direct map Page TLB.  The tlb is to
160   cut down on the need for the OEA to perform walks of the page hash
161   table. */
162
163enum _om_page_tlb_constants {
164  om_page_tlb_index_start_bit = 46,
165  om_page_tlb_index_stop_bit = 51,
166  nr_om_page_tlb_entries = 64,
167#if (WITH_TARGET_WORD_BITSIZE == 64)
168  sizeof_pte_group = 128,
169  sizeof_pte = 16,
170#endif
171#if (WITH_TARGET_WORD_BITSIZE == 32)
172  sizeof_pte_group = 64,
173  sizeof_pte = 8,
174#endif
175  nr_om_page_tlb_constants
176};
177
178typedef struct _om_page_tlb_entry {
179  int protection;
180  int changed;
181  unsigned_word real_address_of_pte_1;
182  unsigned_word masked_virtual_segment_id;
183  unsigned_word masked_page;
184  unsigned_word masked_real_page_number;
185} om_page_tlb_entry;
186
187typedef struct _om_page_tlb {
188  om_page_tlb_entry entry[nr_om_page_tlb_entries];
189} om_page_tlb;
190
191
192/* memory translation:
193
194   OEA memory translation possibly involves BAT, SR, TLB and HTAB
195   information*/
196
197typedef struct _om_map {
198
199  /* local cache of register values */
200  int is_relocate;
201  int is_problem_state;
202
203  /* block address translation */
204  om_bats *bat_registers;
205
206  /* failing that, translate ea to va using segment tlb */
207#if (WITH_TARGET_WORD_BITSIZE == 64)
208  unsigned_word real_address_of_segment_table;
209#endif
210  om_segment_tlb *segment_tlb;
211
212  /* then va to ra using hashed page table and tlb */
213  unsigned_word real_address_of_page_table;
214  unsigned_word page_table_hash_mask;
215  om_page_tlb *page_tlb;
216
217  /* physical memory for fetching page table entries */
218  core_map *physical;
219
220  /* address xor for PPC endian */
221  unsigned xor[WITH_XOR_ENDIAN];
222
223} om_map;
224
225
226/* VM objects:
227
228   External objects defined by vm.h */
229
230struct _vm_instruction_map {
231  /* real memory for last part */
232  core_map *code;
233  /* translate effective to real */
234  om_map translation;
235};
236
237struct _vm_data_map {
238  /* translate effective to real */
239  om_map translation;
240  /* real memory for translated address */
241  core_map *read;
242  core_map *write;
243};
244
245
246/* VM:
247
248   Underlying memory object.  For the VEA this is just the
249   core_map. For OEA it is the instruction and data memory
250   translation's */
251
252struct _vm {
253
254  /* OEA: base address registers */
255  om_bats ibats;
256  om_bats dbats;
257
258  /* OEA: segment registers */
259  om_segment_tlb segment_tlb;
260
261  /* OEA: translation lookaside buffers */
262  om_page_tlb instruction_tlb;
263  om_page_tlb data_tlb;
264
265  /* real memory */
266  core *physical;
267
268  /* memory maps */
269  vm_instruction_map instruction_map;
270  vm_data_map data_map;
271
272};
273
274
275/* OEA Support procedures */
276
277
278STATIC_INLINE_VM\
279(unsigned_word)
280om_segment_tlb_index(unsigned_word ea)
281{
282  unsigned_word index = EXTRACTED(ea,
283				  om_segment_tlb_index_start_bit,
284				  om_segment_tlb_index_stop_bit);
285  return index;
286}
287
288STATIC_INLINE_VM\
289(unsigned_word)
290om_page_tlb_index(unsigned_word ea)
291{
292  unsigned_word index = EXTRACTED(ea,
293				  om_page_tlb_index_start_bit,
294				  om_page_tlb_index_stop_bit);
295  return index;
296}
297
298STATIC_INLINE_VM\
299(unsigned_word)
300om_hash_page(unsigned_word masked_vsid,
301	     unsigned_word ea)
302{
303  unsigned_word extracted_ea = EXTRACTED(ea, 36, 51);
304#if (WITH_TARGET_WORD_BITSIZE == 32)
305  unsigned_word masked_ea = INSERTED32(extracted_ea, 7, 31-6);
306  unsigned_word hash = masked_vsid ^ masked_ea;
307#endif
308#if (WITH_TARGET_WORD_BITSIZE == 64)
309  unsigned_word masked_ea = INSERTED64(extracted_ea, 18, 63-7);
310  unsigned_word hash = masked_vsid ^ masked_ea;
311#endif
312  TRACE(trace_vm, ("ea=0x%lx - masked-vsid=0x%lx masked-ea=0x%lx hash=0x%lx\n",
313		   (unsigned long)ea,
314		   (unsigned long)masked_vsid,
315		   (unsigned long)masked_ea,
316		   (unsigned long)hash));
317  return hash;
318}
319
320STATIC_INLINE_VM\
321(unsigned_word)
322om_pte_0_api(unsigned_word pte_0)
323{
324#if (WITH_TARGET_WORD_BITSIZE == 32)
325  return EXTRACTED32(pte_0, 26, 31);
326#endif
327#if (WITH_TARGET_WORD_BITSIZE == 64)
328  return EXTRACTED64(pte_0, 52, 56);
329#endif
330}
331
332STATIC_INLINE_VM\
333(unsigned_word)
334om_pte_0_hash(unsigned_word pte_0)
335{
336#if (WITH_TARGET_WORD_BITSIZE == 32)
337  return EXTRACTED32(pte_0, 25, 25);
338#endif
339#if (WITH_TARGET_WORD_BITSIZE == 64)
340  return EXTRACTED64(pte_0, 62, 62);
341#endif
342}
343
344STATIC_INLINE_VM\
345(int)
346om_pte_0_valid(unsigned_word pte_0)
347{
348#if (WITH_TARGET_WORD_BITSIZE == 32)
349  return MASKED32(pte_0, 0, 0) != 0;
350#endif
351#if (WITH_TARGET_WORD_BITSIZE == 64)
352  return MASKED64(pte_0, 63, 63) != 0;
353#endif
354}
355
356STATIC_INLINE_VM\
357(unsigned_word)
358om_ea_masked_page(unsigned_word ea)
359{
360  return MASKED(ea, 36, 51);
361}
362
363STATIC_INLINE_VM\
364(unsigned_word)
365om_ea_masked_byte(unsigned_word ea)
366{
367  return MASKED(ea, 52, 63);
368}
369
370/* return the VSID aligned for pte group addr */
371STATIC_INLINE_VM\
372(unsigned_word)
373om_pte_0_masked_vsid(unsigned_word pte_0)
374{
375#if (WITH_TARGET_WORD_BITSIZE == 32)
376  return INSERTED32(EXTRACTED32(pte_0, 1, 24), 31-6-24+1, 31-6);
377#endif
378#if (WITH_TARGET_WORD_BITSIZE == 64)
379  return INSERTED64(EXTRACTED64(pte_0, 0, 51), 63-7-52+1, 63-7);
380#endif
381}
382
383STATIC_INLINE_VM\
384(unsigned_word)
385om_pte_1_pp(unsigned_word pte_1)
386{
387  return MASKED(pte_1, 62, 63); /*PP*/
388}
389
390STATIC_INLINE_VM\
391(int)
392om_pte_1_referenced(unsigned_word pte_1)
393{
394  return EXTRACTED(pte_1, 55, 55);
395}
396
397STATIC_INLINE_VM\
398(int)
399om_pte_1_changed(unsigned_word pte_1)
400{
401  return EXTRACTED(pte_1, 56, 56);
402}
403
404STATIC_INLINE_VM\
405(int)
406om_pte_1_masked_rpn(unsigned_word pte_1)
407{
408  return MASKED(pte_1, 0, 51); /*RPN*/
409}
410
411STATIC_INLINE_VM\
412(unsigned_word)
413om_ea_api(unsigned_word ea)
414{
415  return EXTRACTED(ea, 36, 41);
416}
417
418
419/* Page and Segment table read/write operators, these need to still
420   account for the PPC's XOR operation */
421
422STATIC_INLINE_VM\
423(unsigned_word)
424om_read_word(om_map *map,
425	     unsigned_word ra,
426	     cpu *processor,
427	     unsigned_word cia)
428{
429  if (WITH_XOR_ENDIAN)
430    ra ^= map->xor[sizeof(instruction_word) - 1];
431  return core_map_read_word(map->physical, ra, processor, cia);
432}
433
434STATIC_INLINE_VM\
435(void)
436om_write_word(om_map *map,
437	      unsigned_word ra,
438	      unsigned_word val,
439	      cpu *processor,
440	      unsigned_word cia)
441{
442  if (WITH_XOR_ENDIAN)
443    ra ^= map->xor[sizeof(instruction_word) - 1];
444  core_map_write_word(map->physical, ra, val, processor, cia);
445}
446
447
448/* Bring things into existance */
449
450INLINE_VM\
451(vm *)
452vm_create(core *physical)
453{
454  vm *virtual;
455
456  /* internal checks */
457  if (nr_om_segment_tlb_entries
458      != (1 << (om_segment_tlb_index_stop_bit
459		- om_segment_tlb_index_start_bit + 1)))
460    error("internal error - vm_create - problem with om_segment constants\n");
461  if (nr_om_page_tlb_entries
462      != (1 << (om_page_tlb_index_stop_bit
463		- om_page_tlb_index_start_bit + 1)))
464    error("internal error - vm_create - problem with om_page constants\n");
465
466  /* create the new vm register file */
467  virtual = ZALLOC(vm);
468
469  /* set up core */
470  virtual->physical = physical;
471
472  /* set up the address decoders */
473  virtual->instruction_map.translation.bat_registers = &virtual->ibats;
474  virtual->instruction_map.translation.segment_tlb = &virtual->segment_tlb;
475  virtual->instruction_map.translation.page_tlb = &virtual->instruction_tlb;
476  virtual->instruction_map.translation.is_relocate = 0;
477  virtual->instruction_map.translation.is_problem_state = 0;
478  virtual->instruction_map.translation.physical = core_readable(physical);
479  virtual->instruction_map.code = core_readable(physical);
480
481  virtual->data_map.translation.bat_registers = &virtual->dbats;
482  virtual->data_map.translation.segment_tlb = &virtual->segment_tlb;
483  virtual->data_map.translation.page_tlb = &virtual->data_tlb;
484  virtual->data_map.translation.is_relocate = 0;
485  virtual->data_map.translation.is_problem_state = 0;
486  virtual->data_map.translation.physical = core_readable(physical);
487  virtual->data_map.read = core_readable(physical);
488  virtual->data_map.write = core_writeable(physical);
489
490  return virtual;
491}
492
493
494STATIC_INLINE_VM\
495(om_bat *)
496om_effective_to_bat(om_map *map,
497		    unsigned_word ea)
498{
499  int curr_bat = 0;
500  om_bats *bats = map->bat_registers;
501  int nr_bats = bats->nr_valid_bat_registers;
502
503  for (curr_bat = 0; curr_bat < nr_bats; curr_bat++) {
504    om_bat *bat = bats->bat + curr_bat;
505    if ((ea & bat->block_effective_page_index_mask)
506	!= bat->block_effective_page_index)
507      continue;
508    return bat;
509  }
510
511  return NULL;
512}
513
514
515STATIC_INLINE_VM\
516(om_segment_tlb_entry *)
517om_effective_to_virtual(om_map *map,
518			unsigned_word ea,
519			cpu *processor,
520			unsigned_word cia)
521{
522  /* first try the segment tlb */
523  om_segment_tlb_entry *segment_tlb_entry = (map->segment_tlb->entry
524					     + om_segment_tlb_index(ea));
525
526#if (WITH_TARGET_WORD_BITSIZE == 32)
527  TRACE(trace_vm, ("ea=0x%lx - sr[%ld] - masked-vsid=0x%lx va=0x%lx%07lx\n",
528		   (unsigned long)ea,
529		   (long)om_segment_tlb_index(ea),
530		   (unsigned long)segment_tlb_entry->masked_virtual_segment_id,
531		   (unsigned long)EXTRACTED32(segment_tlb_entry->masked_virtual_segment_id, 31-6-24+1, 31-6),
532		   (unsigned long)EXTRACTED32(ea, 4, 31)));
533  return segment_tlb_entry;
534#endif
535
536#if (WITH_TARGET_WORD_BITSIZE == 64)
537  if (segment_tlb_entry->is_valid
538      && (segment_tlb_entry->masked_effective_segment_id == MASKED(ea, 0, 35))) {
539    error("fixme - is there a need to update any bits\n");
540    return segment_tlb_entry;
541  }
542
543  /* drats, segment tlb missed */
544  {
545    unsigned_word segment_id_hash = ea;
546    int current_hash = 0;
547    for (current_hash = 0; current_hash < 2; current_hash += 1) {
548      unsigned_word segment_table_entry_group =
549	(map->real_address_of_segment_table
550	 | (MASKED64(segment_id_hash, 31, 35) >> (56-35)));
551      unsigned_word segment_table_entry;
552      for (segment_table_entry = segment_table_entry_group;
553	   segment_table_entry < (segment_table_entry_group
554				  + sizeof_segment_table_entry_group);
555	   segment_table_entry += sizeof_segment_table_entry) {
556	/* byte order? */
557	unsigned_word segment_table_entry_dword_0 =
558	  om_read_word(map->physical, segment_table_entry, processor, cia);
559	unsigned_word segment_table_entry_dword_1 =
560	  om_read_word(map->physical, segment_table_entry + 8,
561		       processor, cia);
562	int is_valid = MASKED64(segment_table_entry_dword_0, 56, 56) != 0;
563	unsigned_word masked_effective_segment_id =
564	  MASKED64(segment_table_entry_dword_0, 0, 35);
565	if (is_valid && masked_effective_segment_id == MASKED64(ea, 0, 35)) {
566	  /* don't permit some things */
567	  if (MASKED64(segment_table_entry_dword_0, 57, 57))
568	    error("om_effective_to_virtual() - T=1 in STE not supported\n");
569	  /* update segment tlb */
570	  segment_tlb_entry->is_valid = is_valid;
571	  segment_tlb_entry->masked_effective_segment_id =
572	    masked_effective_segment_id;
573	  segment_tlb_entry->key[om_supervisor_state] =
574	    EXTRACTED64(segment_table_entry_dword_0, 58, 58);
575	  segment_tlb_entry->key[om_problem_state] =
576	    EXTRACTED64(segment_table_entry_dword_0, 59, 59);
577	  segment_tlb_entry->invalid_access =
578	    (MASKED64(segment_table_entry_dword_0, 60, 60)
579	     ? om_instruction_read
580	     : om_access_any);
581	  segment_tlb_entry->masked_virtual_segment_id =
582	    INSERTED64(EXTRACTED64(segment_table_entry_dword_1, 0, 51),
583		       18-13, 63-7); /* aligned ready for pte group addr */
584	  return segment_tlb_entry;
585	}
586      }
587      segment_id_hash = ~segment_id_hash;
588    }
589  }
590  return NULL;
591#endif
592}
593
594
595
596STATIC_INLINE_VM\
597(om_page_tlb_entry *)
598om_virtual_to_real(om_map *map,
599		   unsigned_word ea,
600		   om_segment_tlb_entry *segment_tlb_entry,
601		   om_access_types access,
602		   cpu *processor,
603		   unsigned_word cia)
604{
605  om_page_tlb_entry *page_tlb_entry = (map->page_tlb->entry
606				       + om_page_tlb_index(ea));
607
608  /* is it a tlb hit? */
609  if ((page_tlb_entry->masked_virtual_segment_id
610       == segment_tlb_entry->masked_virtual_segment_id)
611      && (page_tlb_entry->masked_page
612	  == om_ea_masked_page(ea))) {
613    TRACE(trace_vm, ("ea=0x%lx - tlb hit - tlb=0x%lx\n",
614	       (long)ea, (long)page_tlb_entry));
615    return page_tlb_entry;
616  }
617
618  /* drats, it is a tlb miss */
619  {
620    unsigned_word page_hash =
621      om_hash_page(segment_tlb_entry->masked_virtual_segment_id, ea);
622    int current_hash;
623    for (current_hash = 0; current_hash < 2; current_hash += 1) {
624      unsigned_word real_address_of_pte_group =
625	(map->real_address_of_page_table
626	 | (page_hash & map->page_table_hash_mask));
627      unsigned_word real_address_of_pte_0;
628      TRACE(trace_vm,
629	    ("ea=0x%lx - htab search %d - htab=0x%lx hash=0x%lx mask=0x%lx pteg=0x%lx\n",
630	     (long)ea, current_hash,
631	     map->real_address_of_page_table,
632	     page_hash,
633	     map->page_table_hash_mask,
634	     (long)real_address_of_pte_group));
635      for (real_address_of_pte_0 = real_address_of_pte_group;
636	   real_address_of_pte_0 < (real_address_of_pte_group
637				    + sizeof_pte_group);
638	   real_address_of_pte_0 += sizeof_pte) {
639	unsigned_word pte_0 = om_read_word(map,
640					   real_address_of_pte_0,
641					   processor, cia);
642	/* did we hit? */
643	if (om_pte_0_valid(pte_0)
644	    && (current_hash == om_pte_0_hash(pte_0))
645	    && (segment_tlb_entry->masked_virtual_segment_id
646		== om_pte_0_masked_vsid(pte_0))
647	    && (om_ea_api(ea) == om_pte_0_api(pte_0))) {
648	  unsigned_word real_address_of_pte_1 = (real_address_of_pte_0
649						 + sizeof_pte / 2);
650	  unsigned_word pte_1 = om_read_word(map,
651					     real_address_of_pte_1,
652					     processor, cia);
653	  page_tlb_entry->protection = om_pte_1_pp(pte_1);
654	  page_tlb_entry->changed = om_pte_1_changed(pte_1);
655	  page_tlb_entry->masked_virtual_segment_id = segment_tlb_entry->masked_virtual_segment_id;
656	  page_tlb_entry->masked_page = om_ea_masked_page(ea);
657	  page_tlb_entry->masked_real_page_number = om_pte_1_masked_rpn(pte_1);
658	  page_tlb_entry->real_address_of_pte_1 = real_address_of_pte_1;
659	  if (!om_pte_1_referenced(pte_1)) {
660	    om_write_word(map,
661			  real_address_of_pte_1,
662			  pte_1 | BIT(55),
663			  processor, cia);
664	    TRACE(trace_vm,
665		  ("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
666		   (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
667	  }
668	  else {
669	    TRACE(trace_vm,
670		  ("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
671		   (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
672	  }
673	  return page_tlb_entry;
674	}
675      }
676      page_hash = ~page_hash; /*???*/
677    }
678  }
679  return NULL;
680}
681
682
683STATIC_INLINE_VM\
684(void)
685om_interrupt(cpu *processor,
686	     unsigned_word cia,
687	     unsigned_word ea,
688	     om_access_types access,
689	     storage_interrupt_reasons reason)
690{
691  switch (access) {
692  case om_data_read:
693    data_storage_interrupt(processor, cia, ea, reason, 0/*!is_store*/);
694    break;
695  case om_data_write:
696    data_storage_interrupt(processor, cia, ea, reason, 1/*is_store*/);
697    break;
698  case om_instruction_read:
699    instruction_storage_interrupt(processor, cia, reason);
700    break;
701  default:
702    error("internal error - om_interrupt - unexpected access type %d", access);
703  }
704}
705
706
707STATIC_INLINE_VM\
708(unsigned_word)
709om_translate_effective_to_real(om_map *map,
710			       unsigned_word ea,
711			       om_access_types access,
712			       cpu *processor,
713			       unsigned_word cia,
714			       int abort)
715{
716  om_bat *bat = NULL;
717  om_segment_tlb_entry *segment_tlb_entry = NULL;
718  om_page_tlb_entry *page_tlb_entry = NULL;
719  unsigned_word ra;
720
721  if (!map->is_relocate) {
722    ra = ea;
723    TRACE(trace_vm, ("ea=0x%lx - direct map - ra=0x%lx\n",
724		     (long)ea, (long)ra));
725    return ra;
726  }
727
728  /* match with BAT? */
729  bat = om_effective_to_bat(map, ea);
730  if (bat != NULL) {
731    if (!om_valid_access[1][bat->protection_bits][access]) {
732      TRACE(trace_vm, ("ea=0x%lx - bat access violation\n", (long)ea));
733      if (abort)
734	om_interrupt(processor, cia, ea, access,
735		     protection_violation_storage_interrupt);
736      else
737	return MASK(0, 63);
738    }
739
740    ra = ((ea & bat->block_length_mask) | bat->block_real_page_number);
741    TRACE(trace_vm, ("ea=0x%lx - bat translation - ra=0x%lx\n",
742		     (long)ea, (long)ra));
743    return ra;
744  }
745
746  /* translate ea to va using segment map */
747  segment_tlb_entry = om_effective_to_virtual(map, ea, processor, cia);
748#if (WITH_TARGET_WORD_BITSIZE == 64)
749  if (segment_tlb_entry == NULL) {
750    TRACE(trace_vm, ("ea=0x%lx - segment tlb miss\n", (long)ea));
751    if (abort)
752      om_interrupt(processor, cia, ea, access,
753		   segment_table_miss_storage_interrupt);
754    else
755      return MASK(0, 63);
756  }
757#endif
758  /* check for invalid segment access type */
759  if (segment_tlb_entry->invalid_access == access) {
760    TRACE(trace_vm, ("ea=0x%lx - segment access invalid\n", (long)ea));
761    if (abort)
762      om_interrupt(processor, cia, ea, access,
763		   protection_violation_storage_interrupt);
764    else
765      return MASK(0, 63);
766  }
767
768  /* lookup in PTE */
769  page_tlb_entry = om_virtual_to_real(map, ea, segment_tlb_entry,
770				      access,
771				      processor, cia);
772  if (page_tlb_entry == NULL) {
773    TRACE(trace_vm, ("ea=0x%lx - page tlb miss\n", (long)ea));
774    if (abort)
775      om_interrupt(processor, cia, ea, access,
776		   hash_table_miss_storage_interrupt);
777    else
778      return MASK(0, 63);
779  }
780  if (!(om_valid_access
781	[segment_tlb_entry->key[map->is_problem_state]]
782	[page_tlb_entry->protection]
783	[access])) {
784    TRACE(trace_vm, ("ea=0x%lx - page tlb access violation\n", (long)ea));
785    if (abort)
786      om_interrupt(processor, cia, ea, access,
787		   protection_violation_storage_interrupt);
788    else
789      return MASK(0, 63);
790  }
791
792  /* update change bit as needed */
793  if (access == om_data_write &&!page_tlb_entry->changed) {
794    unsigned_word pte_1 = om_read_word(map,
795				       page_tlb_entry->real_address_of_pte_1,
796				       processor, cia);
797    om_write_word(map,
798		  page_tlb_entry->real_address_of_pte_1,
799		  pte_1 | BIT(56),
800		  processor, cia);
801    TRACE(trace_vm, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",
802		     (long)ea, (long)page_tlb_entry,
803		     (long)page_tlb_entry->real_address_of_pte_1));
804  }
805
806  ra = (page_tlb_entry->masked_real_page_number | om_ea_masked_byte(ea));
807  TRACE(trace_vm, ("ea=0x%lx - page translation - ra=0x%lx\n",
808		   (long)ea, (long)ra));
809  return ra;
810}
811
812
813/*
814 * Definition of operations for memory management
815 */
816
817
818/* rebuild all the relevant bat information */
819STATIC_INLINE_VM\
820(void)
821om_unpack_bat(om_bat *bat,
822	      spreg ubat,
823	      spreg lbat)
824{
825  /* for extracting out the offset within a page */
826  bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))
827			    | MASK(63-17+1, 63));
828
829  /* for checking the effective page index */
830  bat->block_effective_page_index = MASKED(ubat, 0, 46);
831  bat->block_effective_page_index_mask = ~bat->block_length_mask;
832
833  /* protection information */
834  bat->protection_bits = EXTRACTED(lbat, 62, 63);
835  bat->block_real_page_number = MASKED(lbat, 0, 46);
836}
837
838
839/* rebuild the given bat table */
840STATIC_INLINE_VM\
841(void)
842om_unpack_bats(om_bats *bats,
843	       spreg *raw_bats,
844	       msreg msr)
845{
846  int i;
847  bats->nr_valid_bat_registers = 0;
848  for (i = 0; i < nr_om_bat_registers*2; i += 2) {
849    spreg ubat = raw_bats[i];
850    spreg lbat = raw_bats[i+1];
851    if ((msr & msr_problem_state)
852	? EXTRACTED(ubat, 63, 63)
853	: EXTRACTED(ubat, 62, 62)) {
854      om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
855		    ubat, lbat);
856      bats->nr_valid_bat_registers += 1;
857    }
858  }
859}
860
861
862#if (WITH_TARGET_WORD_BITSIZE == 32)
863STATIC_INLINE_VM\
864(void)
865om_unpack_sr(vm *virtual,
866	     sreg *srs,
867	     int which_sr,
868	     cpu *processor,
869	     unsigned_word cia)
870{
871  om_segment_tlb_entry *segment_tlb_entry = 0;
872  sreg new_sr_value = 0;
873
874  /* check register in range */
875  ASSERT(which_sr >= 0 && which_sr < nr_om_segment_tlb_entries);
876
877  /* get the working values */
878  segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
879  new_sr_value = srs[which_sr];
880
881  /* do we support this */
882  if (MASKED32(new_sr_value, 0, 0))
883    cpu_error(processor, cia, "unsupported value of T in segment register %d",
884	      which_sr);
885
886  /* update info */
887  segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
888  segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);
889  segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)
890				       ? om_instruction_read
891				       : om_access_any);
892  segment_tlb_entry->masked_virtual_segment_id =
893    INSERTED32(EXTRACTED32(new_sr_value, 8, 31),
894	       31-6-24+1, 31-6); /* aligned ready for pte group addr */
895}
896#endif
897
898
899#if (WITH_TARGET_WORD_BITSIZE == 32)
900STATIC_INLINE_VM\
901(void)
902om_unpack_srs(vm *virtual,
903	      sreg *srs,
904	      cpu *processor,
905	      unsigned_word cia)
906{
907  int which_sr;
908  for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
909    om_unpack_sr(virtual, srs, which_sr,
910		 processor, cia);
911  }
912}
913#endif
914
915
916/* Rebuild all the data structures for the new context as specifed by
917   the passed registers */
918INLINE_VM\
919(void)
920vm_synchronize_context(vm *virtual,
921		       spreg *sprs,
922		       sreg *srs,
923		       msreg msr,
924		       /**/
925		       cpu *processor,
926		       unsigned_word cia)
927{
928
929  /* enable/disable translation */
930  int problem_state = (msr & msr_problem_state) != 0;
931  int data_relocate = (msr & msr_data_relocate) != 0;
932  int instruction_relocate = (msr & msr_instruction_relocate) != 0;
933  int little_endian = (msr & msr_little_endian_mode) != 0;
934
935  unsigned_word page_table_hash_mask;
936  unsigned_word real_address_of_page_table;
937
938  /* update current processor mode */
939  virtual->instruction_map.translation.is_relocate = instruction_relocate;
940  virtual->instruction_map.translation.is_problem_state = problem_state;
941  virtual->data_map.translation.is_relocate = data_relocate;
942  virtual->data_map.translation.is_problem_state = problem_state;
943
944  /* update bat registers for the new context */
945  om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);
946  om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);
947
948  /* unpack SDR1 - the storage description register 1 */
949#if (WITH_TARGET_WORD_BITSIZE == 64)
950  real_address_of_page_table = MASKED64(sprs[spr_sdr1], 0, 45);
951  page_table_hash_mask = MASK64(18+28-EXTRACTED64(sprs[spr_sdr1], 59, 63),
952				63-7);
953#endif
954#if (WITH_TARGET_WORD_BITSIZE == 32)
955  real_address_of_page_table = MASKED32(sprs[spr_sdr1], 0, 15);
956  page_table_hash_mask = (INSERTED32(EXTRACTED32(sprs[spr_sdr1], 23, 31),
957				     7, 7+9-1)
958			  | MASK32(7+9, 31-6));
959#endif
960  virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;
961  virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;
962  virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;
963  virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;
964
965
966  /* unpack the segment tlb registers */
967#if (WITH_TARGET_WORD_BITSIZE == 32)
968  om_unpack_srs(virtual, srs,
969		processor, cia);
970#endif
971
972  /* set up the XOR registers if the current endian mode conflicts
973     with what is in the MSR */
974  if (WITH_XOR_ENDIAN) {
975    int i = 1;
976    unsigned mask;
977    if ((little_endian && CURRENT_TARGET_BYTE_ORDER == LITTLE_ENDIAN)
978	|| (!little_endian && CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN))
979      mask = 0;
980    else
981      mask = WITH_XOR_ENDIAN - 1;
982    while (i - 1 < WITH_XOR_ENDIAN) {
983      virtual->instruction_map.translation.xor[i-1] = mask;
984      virtual->data_map.translation.xor[i-1] =  mask;
985      mask = (mask << 1) & (WITH_XOR_ENDIAN - 1);
986      i = i * 2;
987    }
988  }
989  else {
990    /* don't allow the processor to change endian modes */
991    if ((little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN)
992	|| (!little_endian && CURRENT_TARGET_BYTE_ORDER != BIG_ENDIAN))
993      cpu_error(processor, cia, "attempt to change hardwired byte order");
994  }
995}
996
997/* update vm data structures due to a TLB operation */
998
999INLINE_VM\
1000(void)
1001vm_page_tlb_invalidate_entry(vm *memory,
1002			     unsigned_word ea)
1003{
1004  int i = om_page_tlb_index(ea);
1005  memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1006  memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1007  TRACE(trace_vm, ("ea=0x%lx - tlb invalidate entry\n", (long)ea));
1008}
1009
1010INLINE_VM\
1011(void)
1012vm_page_tlb_invalidate_all(vm *memory)
1013{
1014  int i;
1015  for (i = 0; i < nr_om_page_tlb_entries; i++) {
1016    memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1017    memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1018  }
1019  TRACE(trace_vm, ("tlb invalidate all\n"));
1020}
1021
1022
1023
1024INLINE_VM\
1025(vm_data_map *)
1026vm_create_data_map(vm *memory)
1027{
1028  return &memory->data_map;
1029}
1030
1031
1032INLINE_VM\
1033(vm_instruction_map *)
1034vm_create_instruction_map(vm *memory)
1035{
1036  return &memory->instruction_map;
1037}
1038
1039
1040STATIC_INLINE_VM\
1041(unsigned_word)
1042vm_translate(om_map *map,
1043	     unsigned_word ea,
1044	     om_access_types access,
1045	     cpu *processor,
1046	     unsigned_word cia,
1047	     int abort)
1048{
1049  switch (CURRENT_ENVIRONMENT) {
1050  case USER_ENVIRONMENT:
1051  case VIRTUAL_ENVIRONMENT:
1052    return ea;
1053  case OPERATING_ENVIRONMENT:
1054    return om_translate_effective_to_real(map, ea, access,
1055					  processor, cia,
1056					  abort);
1057  default:
1058    error("internal error - vm_translate - bad switch");
1059    return 0;
1060  }
1061}
1062
1063
1064INLINE_VM\
1065(unsigned_word)
1066vm_real_data_addr(vm_data_map *map,
1067		  unsigned_word ea,
1068		  int is_read,
1069		  cpu *processor,
1070		  unsigned_word cia)
1071{
1072  return vm_translate(&map->translation,
1073		      ea,
1074		      is_read ? om_data_read : om_data_write,
1075		      processor,
1076		      cia,
1077		      1); /*abort*/
1078}
1079
1080
1081INLINE_VM\
1082(unsigned_word)
1083vm_real_instruction_addr(vm_instruction_map *map,
1084			 cpu *processor,
1085			 unsigned_word cia)
1086{
1087  return vm_translate(&map->translation,
1088		      cia,
1089		      om_instruction_read,
1090		      processor,
1091		      cia,
1092		      1); /*abort*/
1093}
1094
1095INLINE_VM\
1096(instruction_word)
1097vm_instruction_map_read(vm_instruction_map *map,
1098			cpu *processor,
1099			unsigned_word cia)
1100{
1101  unsigned_word ra = vm_real_instruction_addr(map, processor, cia);
1102  ASSERT((cia & 0x3) == 0); /* always aligned */
1103  if (WITH_XOR_ENDIAN)
1104    ra ^= map->translation.xor[sizeof(instruction_word) - 1];
1105  return core_map_read_4(map->code, ra, processor, cia);
1106}
1107
1108
1109INLINE_VM\
1110(int)
1111vm_data_map_read_buffer(vm_data_map *map,
1112			void *target,
1113			unsigned_word addr,
1114			unsigned nr_bytes,
1115			cpu *processor,
1116			unsigned_word cia)
1117{
1118  unsigned count;
1119  for (count = 0; count < nr_bytes; count++) {
1120    unsigned_1 byte;
1121    unsigned_word ea = addr + count;
1122    unsigned_word ra = vm_translate(&map->translation,
1123				    ea, om_data_read,
1124				    processor, /*processor*/
1125				    cia, /*cia*/
1126				    processor != NULL); /*abort?*/
1127    if (ra == MASK(0, 63))
1128      break;
1129    if (WITH_XOR_ENDIAN)
1130      ra ^= map->translation.xor[0];
1131    if (core_map_read_buffer(map->read, &byte, ra, sizeof(byte))
1132	!= sizeof(byte))
1133      break;
1134    ((unsigned_1*)target)[count] = T2H_1(byte);
1135  }
1136  return count;
1137}
1138
1139
1140INLINE_VM\
1141(int)
1142vm_data_map_write_buffer(vm_data_map *map,
1143			 const void *source,
1144			 unsigned_word addr,
1145			 unsigned nr_bytes,
1146			 int violate_read_only_section,
1147			 cpu *processor,
1148			 unsigned_word cia)
1149{
1150  unsigned count;
1151  unsigned_1 byte;
1152  for (count = 0; count < nr_bytes; count++) {
1153    unsigned_word ea = addr + count;
1154    unsigned_word ra = vm_translate(&map->translation,
1155				    ea, om_data_write,
1156				    processor,
1157				    cia,
1158				    processor != NULL); /*abort?*/
1159    if (ra == MASK(0, 63))
1160      break;
1161    if (WITH_XOR_ENDIAN)
1162      ra ^= map->translation.xor[0];
1163    byte = T2H_1(((unsigned_1*)source)[count]);
1164    if (core_map_write_buffer((violate_read_only_section
1165			       ? map->read
1166			       : map->write),
1167			      &byte, ra, sizeof(byte)) != sizeof(byte))
1168      break;
1169  }
1170  return count;
1171}
1172
1173
1174/* define the read/write 1/2/4/8/word functions */
1175
1176#define N 1
1177#include "vm_n.h"
1178#undef N
1179
1180#define N 2
1181#include "vm_n.h"
1182#undef N
1183
1184#define N 4
1185#include "vm_n.h"
1186#undef N
1187
1188#define N 8
1189#include "vm_n.h"
1190#undef N
1191
1192#define N word
1193#include "vm_n.h"
1194#undef N
1195
1196
1197
1198#endif /* _VM_C_ */
1199