1/* smp.h: Sparc64 specific SMP stuff.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef _SPARC64_SMP_H
7#define _SPARC64_SMP_H
8
9#include <linux/config.h>
10#include <linux/threads.h>
11#include <linux/cache.h>
12#include <asm/asi.h>
13#include <asm/starfire.h>
14#include <asm/spitfire.h>
15
16#ifndef __ASSEMBLY__
17/* PROM provided per-processor information we need
18 * to start them all up.
19 */
20
21struct prom_cpuinfo {
22	int prom_node;
23	int mid;
24};
25
26extern int linux_num_cpus;	/* number of CPUs probed  */
27extern struct prom_cpuinfo linux_cpus[64];
28
29#endif /* !(__ASSEMBLY__) */
30
31#ifdef CONFIG_SMP
32
33#ifndef __ASSEMBLY__
34
35/* Per processor Sparc parameters we need. */
36
37/* Keep this a multiple of 64-bytes for cache reasons. */
38typedef struct {
39	/* Dcache line 1 */
40	unsigned int	__pad0;		/* bh_count moved to irq_stat for consistency. KAO */
41	unsigned int	multiplier;
42	unsigned int	counter;
43	unsigned int	idle_volume;
44	unsigned long	clock_tick;	/* %tick's per second */
45	unsigned long	udelay_val;
46
47	/* Dcache line 2 */
48	unsigned int	pgcache_size;
49	unsigned int	pgdcache_size;
50	unsigned long	*pte_cache[2];
51	unsigned long	*pgd_cache;
52
53	/* Dcache lines 3 and 4 */
54	unsigned int	irq_worklists[16];
55} ____cacheline_aligned cpuinfo_sparc;
56
57extern cpuinfo_sparc cpu_data[NR_CPUS];
58
59/*
60 *	Private routines/data
61 */
62
63extern unsigned char boot_cpu_id;
64extern unsigned long cpu_present_map;
65#define cpu_online_map cpu_present_map
66
67/*
68 *	General functions that each host system must provide.
69 */
70
71extern void smp_callin(void);
72extern void smp_boot_cpus(void);
73extern void smp_store_cpu_info(int id);
74
75extern __volatile__ int __cpu_number_map[NR_CPUS];
76extern __volatile__ int __cpu_logical_map[NR_CPUS];
77
78extern __inline__ int cpu_logical_map(int cpu)
79{
80	return __cpu_logical_map[cpu];
81}
82extern __inline__ int cpu_number_map(int cpu)
83{
84	return __cpu_number_map[cpu];
85}
86
87extern __inline__ int hard_smp_processor_id(void)
88{
89	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
90		unsigned long safari_config;
91		__asm__ __volatile__("ldxa [%%g0] %1, %0"
92				     : "=r" (safari_config)
93				     : "i" (ASI_SAFARI_CONFIG));
94		return ((safari_config >> 17) & 0x3ff);
95	} else if (this_is_starfire != 0) {
96		return starfire_hard_smp_processor_id();
97	} else {
98		unsigned long upaconfig;
99		__asm__ __volatile__("ldxa	[%%g0] %1, %0"
100				     : "=r" (upaconfig)
101				     : "i" (ASI_UPA_CONFIG));
102		return ((upaconfig >> 17) & 0x1f);
103	}
104}
105
106#define smp_processor_id() (current->processor)
107
108/* This needn't do anything as we do not sleep the cpu
109 * inside of the idler task, so an interrupt is not needed
110 * to get a clean fast response.
111 *
112 * Addendum: We do want it to do something for the signal
113 *           delivery case, we detect that by just seeing
114 *           if we are trying to send this to an idler or not.
115 */
116extern __inline__ void smp_send_reschedule(int cpu)
117{
118	extern void smp_receive_signal(int);
119	if(cpu_data[cpu].idle_volume == 0)
120		smp_receive_signal(cpu);
121}
122
123/* This is a nop as well because we capture all other cpus
124 * anyways when making the PROM active.
125 */
126extern __inline__ void smp_send_stop(void) { }
127
128#endif /* !(__ASSEMBLY__) */
129
130#define PROC_CHANGE_PENALTY	20
131
132#endif /* !(CONFIG_SMP) */
133
134#define NO_PROC_ID		0xFF
135
136#endif /* !(_SPARC64_SMP_H) */
137