1/*-
2 * Copyright (c) 2009 Rui Paulo <rpaulo@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/pmc.h>
34#include <sys/pmckern.h>
35
36#include <machine/pmc_mdep.h>
37/*
38 * Support for the Intel XScale network processors
39 *
40 * XScale processors have up to now three generations.
41 *
42 * The first generation has two PMC; the event selection, interrupt config
43 * and overflow flag setup are done by writing to the PMNC register.
44 * It also has less monitoring events than the latter generations.
45 *
46 * The second and third generatiosn have four PMCs, one register for the event
47 * selection, one register for the interrupt config and one register for
48 * the overflow flags.
49 */
50static int xscale_npmcs;
51static int xscale_gen;	/* XScale Core generation */
52
53struct xscale_event_code_map {
54	enum pmc_event	pe_ev;
55	uint8_t		pe_code;
56};
57
58const struct xscale_event_code_map xscale_event_codes[] = {
59	/* 1st and 2nd Generation XScale cores */
60	{ PMC_EV_XSCALE_IC_FETCH,		0x00 },
61	{ PMC_EV_XSCALE_IC_MISS,		0x01 },
62	{ PMC_EV_XSCALE_DATA_DEPENDENCY_STALLED,0x02 },
63	{ PMC_EV_XSCALE_ITLB_MISS,		0x03 },
64	{ PMC_EV_XSCALE_DTLB_MISS,		0x04 },
65	{ PMC_EV_XSCALE_BRANCH_RETIRED,		0x05 },
66	{ PMC_EV_XSCALE_BRANCH_MISPRED,		0x06 },
67	{ PMC_EV_XSCALE_INSTR_RETIRED,		0x07 },
68	{ PMC_EV_XSCALE_DC_FULL_CYCLE,		0x08 },
69	{ PMC_EV_XSCALE_DC_FULL_CONTIG, 	0x09 },
70	{ PMC_EV_XSCALE_DC_ACCESS,		0x0a },
71	{ PMC_EV_XSCALE_DC_MISS,		0x0b },
72	{ PMC_EV_XSCALE_DC_WRITEBACK,		0x0c },
73	{ PMC_EV_XSCALE_PC_CHANGE,		0x0d },
74	/* 3rd Generation XScale cores */
75	{ PMC_EV_XSCALE_BRANCH_RETIRED_ALL,	0x0e },
76	{ PMC_EV_XSCALE_INSTR_CYCLE,		0x0f },
77	{ PMC_EV_XSCALE_CP_STALL,		0x17 },
78	{ PMC_EV_XSCALE_PC_CHANGE_ALL,		0x18 },
79	{ PMC_EV_XSCALE_PIPELINE_FLUSH,		0x19 },
80	{ PMC_EV_XSCALE_BACKEND_STALL,		0x1a },
81	{ PMC_EV_XSCALE_MULTIPLIER_USE,		0x1b },
82	{ PMC_EV_XSCALE_MULTIPLIER_STALLED,	0x1c },
83	{ PMC_EV_XSCALE_DATA_CACHE_STALLED,	0x1e },
84	{ PMC_EV_XSCALE_L2_CACHE_REQ,		0x20 },
85	{ PMC_EV_XSCALE_L2_CACHE_MISS,		0x23 },
86	{ PMC_EV_XSCALE_ADDRESS_BUS_TRANS,	0x40 },
87	{ PMC_EV_XSCALE_SELF_ADDRESS_BUS_TRANS,	0x41 },
88	{ PMC_EV_XSCALE_DATA_BUS_TRANS,		0x48 },
89};
90
91const int xscale_event_codes_size =
92	sizeof(xscale_event_codes) / sizeof(xscale_event_codes[0]);
93
94/*
95 * Per-processor information.
96 */
97struct xscale_cpu {
98	struct pmc_hw   *pc_xscalepmcs;
99};
100
101static struct xscale_cpu **xscale_pcpu;
102
103/*
104 * Performance Monitor Control Register
105 */
106static __inline uint32_t
107xscale_pmnc_read(void)
108{
109	uint32_t reg;
110
111	__asm __volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (reg));
112
113	return (reg);
114}
115
116static __inline void
117xscale_pmnc_write(uint32_t reg)
118{
119	__asm __volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (reg));
120}
121
122/*
123 * Clock Counter Register
124 */
125static __inline uint32_t
126xscale_ccnt_read(void)
127{
128	uint32_t reg;
129
130	__asm __volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (reg));
131
132	return (reg);
133}
134
135static __inline void
136xscale_ccnt_write(uint32_t reg)
137{
138	__asm __volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (reg));
139}
140
141/*
142 * Interrupt Enable Register
143 */
144static __inline uint32_t
145xscale_inten_read(void)
146{
147	uint32_t reg;
148
149	__asm __volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (reg));
150
151	return (reg);
152}
153
154static __inline void
155xscale_inten_write(uint32_t reg)
156{
157	__asm __volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (reg));
158}
159
160/*
161 * Overflow Flag Register
162 */
163static __inline uint32_t
164xscale_flag_read(void)
165{
166	uint32_t reg;
167
168	__asm __volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (reg));
169
170	return (reg);
171}
172
173static __inline void
174xscale_flag_write(uint32_t reg)
175{
176	__asm __volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (reg));
177}
178
179/*
180 * Event Selection Register
181 */
182static __inline uint32_t
183xscale_evtsel_read(void)
184{
185	uint32_t reg;
186
187	__asm __volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (reg));
188
189	return (reg);
190}
191
192static __inline void
193xscale_evtsel_write(uint32_t reg)
194{
195	__asm __volatile("mcr p14, 0, %0, c8, c1, 0" : : "r" (reg));
196}
197
198/*
199 * Performance Count Register N
200 */
201static uint32_t
202xscale_pmcn_read(unsigned int pmc)
203{
204	uint32_t reg = 0;
205
206	KASSERT(pmc < 4, ("[xscale,%d] illegal PMC number %d", __LINE__, pmc));
207
208	switch (pmc) {
209	case 0:
210		__asm __volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (reg));
211		break;
212	case 1:
213		__asm __volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (reg));
214		break;
215	case 2:
216		__asm __volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (reg));
217		break;
218	case 3:
219		__asm __volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (reg));
220		break;
221	}
222
223	return (reg);
224}
225
226static uint32_t
227xscale_pmcn_write(unsigned int pmc, uint32_t reg)
228{
229
230	KASSERT(pmc < 4, ("[xscale,%d] illegal PMC number %d", __LINE__, pmc));
231
232	switch (pmc) {
233	case 0:
234		__asm __volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (reg));
235		break;
236	case 1:
237		__asm __volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (reg));
238		break;
239	case 2:
240		__asm __volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (reg));
241		break;
242	case 3:
243		__asm __volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (reg));
244		break;
245	}
246
247	return (reg);
248}
249
250static int
251xscale_allocate_pmc(int cpu, int ri, struct pmc *pm,
252  const struct pmc_op_pmcallocate *a)
253{
254	enum pmc_event pe;
255	uint32_t caps, config;
256	int i;
257
258	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
259	    ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
260	KASSERT(ri >= 0 && ri < xscale_npmcs,
261	    ("[xscale,%d] illegal row index %d", __LINE__, ri));
262
263	caps = a->pm_caps;
264	if (a->pm_class != PMC_CLASS_XSCALE)
265		return (EINVAL);
266	pe = a->pm_ev;
267	for (i = 0; i < xscale_event_codes_size; i++) {
268		if (xscale_event_codes[i].pe_ev == pe) {
269			config = xscale_event_codes[i].pe_code;
270			break;
271		}
272	}
273	if (i == xscale_event_codes_size)
274		return EINVAL;
275	/* Generation 1 has fewer events */
276	if (xscale_gen == 1 && i > PMC_EV_XSCALE_PC_CHANGE)
277		return EINVAL;
278	pm->pm_md.pm_xscale.pm_xscale_evsel = config;
279
280	PMCDBG(MDP,ALL,2,"xscale-allocate ri=%d -> config=0x%x", ri, config);
281
282	return 0;
283}
284
285
286static int
287xscale_read_pmc(int cpu, int ri, pmc_value_t *v)
288{
289	struct pmc *pm;
290	pmc_value_t tmp;
291
292	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
293	    ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
294	KASSERT(ri >= 0 && ri < xscale_npmcs,
295	    ("[xscale,%d] illegal row index %d", __LINE__, ri));
296
297	pm  = xscale_pcpu[cpu]->pc_xscalepmcs[ri].phw_pmc;
298	tmp = xscale_pmcn_read(ri);
299	PMCDBG(MDP,REA,2,"xscale-read id=%d -> %jd", ri, tmp);
300	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
301		*v = XSCALE_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
302	else
303		*v = tmp;
304
305	return 0;
306}
307
308static int
309xscale_write_pmc(int cpu, int ri, pmc_value_t v)
310{
311	struct pmc *pm;
312
313	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
314	    ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
315	KASSERT(ri >= 0 && ri < xscale_npmcs,
316	    ("[xscale,%d] illegal row-index %d", __LINE__, ri));
317
318	pm  = xscale_pcpu[cpu]->pc_xscalepmcs[ri].phw_pmc;
319
320	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
321		v = XSCALE_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
322
323	PMCDBG(MDP,WRI,1,"xscale-write cpu=%d ri=%d v=%jx", cpu, ri, v);
324
325	xscale_pmcn_write(ri, v);
326
327	return 0;
328}
329
330static int
331xscale_config_pmc(int cpu, int ri, struct pmc *pm)
332{
333	struct pmc_hw *phw;
334
335	PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
336
337	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
338	    ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
339	KASSERT(ri >= 0 && ri < xscale_npmcs,
340	    ("[xscale,%d] illegal row-index %d", __LINE__, ri));
341
342	phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
343
344	KASSERT(pm == NULL || phw->phw_pmc == NULL,
345	    ("[xscale,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
346	    __LINE__, pm, phw->phw_pmc));
347
348	phw->phw_pmc = pm;
349
350	return 0;
351}
352
353static int
354xscale_start_pmc(int cpu, int ri)
355{
356	uint32_t pmnc, config, evtsel;
357        struct pmc *pm;
358        struct pmc_hw *phw;
359
360	phw    = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
361	pm     = phw->phw_pmc;
362	config = pm->pm_md.pm_xscale.pm_xscale_evsel;
363
364	/*
365	 * Configure the event selection.
366	 *
367	 * On the XScale 2nd Generation there's no EVTSEL register.
368	 */
369	if (xscale_npmcs == 2) {
370		pmnc = xscale_pmnc_read();
371		switch (ri) {
372		case 0:
373			pmnc &= ~XSCALE_PMNC_EVT0_MASK;
374			pmnc |= (config << 12) & XSCALE_PMNC_EVT0_MASK;
375			break;
376		case 1:
377			pmnc &= ~XSCALE_PMNC_EVT1_MASK;
378			pmnc |= (config << 20) & XSCALE_PMNC_EVT1_MASK;
379			break;
380		default:
381			/* XXX */
382			break;
383		}
384		xscale_pmnc_write(pmnc);
385	} else {
386		evtsel = xscale_evtsel_read();
387		switch (ri) {
388		case 0:
389			evtsel &= ~XSCALE_EVTSEL_EVT0_MASK;
390			evtsel |= config & XSCALE_EVTSEL_EVT0_MASK;
391			break;
392		case 1:
393			evtsel &= ~XSCALE_EVTSEL_EVT1_MASK;
394			evtsel |= (config << 8) & XSCALE_EVTSEL_EVT1_MASK;
395			break;
396		case 2:
397			evtsel &= ~XSCALE_EVTSEL_EVT2_MASK;
398			evtsel |= (config << 16) & XSCALE_EVTSEL_EVT2_MASK;
399			break;
400		case 3:
401			evtsel &= ~XSCALE_EVTSEL_EVT3_MASK;
402			evtsel |= (config << 24) & XSCALE_EVTSEL_EVT3_MASK;
403			break;
404		default:
405			/* XXX */
406			break;
407		}
408		xscale_evtsel_write(evtsel);
409	}
410	/*
411	 * Enable the PMC.
412	 *
413	 * Note that XScale provides only one bit to enable/disable _all_
414	 * performance monitoring units.
415	 */
416	pmnc = xscale_pmnc_read();
417	pmnc |= XSCALE_PMNC_ENABLE;
418	xscale_pmnc_write(pmnc);
419
420	return 0;
421}
422
423static int
424xscale_stop_pmc(int cpu, int ri)
425{
426	uint32_t pmnc, evtsel;
427        struct pmc *pm;
428        struct pmc_hw *phw;
429
430	phw    = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
431	pm     = phw->phw_pmc;
432
433	/*
434	 * Disable the PMCs.
435	 *
436	 * Note that XScale provides only one bit to enable/disable _all_
437	 * performance monitoring units.
438	 */
439	pmnc = xscale_pmnc_read();
440	pmnc &= ~XSCALE_PMNC_ENABLE;
441	xscale_pmnc_write(pmnc);
442	/*
443	 * A value of 0xff makes the corresponding PMU go into
444	 * power saving mode.
445	 */
446	if (xscale_npmcs == 2) {
447		pmnc = xscale_pmnc_read();
448		switch (ri) {
449		case 0:
450			pmnc |= XSCALE_PMNC_EVT0_MASK;
451			break;
452		case 1:
453			pmnc |= XSCALE_PMNC_EVT1_MASK;
454			break;
455		default:
456			/* XXX */
457			break;
458		}
459		xscale_pmnc_write(pmnc);
460	} else {
461		evtsel = xscale_evtsel_read();
462		switch (ri) {
463		case 0:
464			evtsel |= XSCALE_EVTSEL_EVT0_MASK;
465			break;
466		case 1:
467			evtsel |= XSCALE_EVTSEL_EVT1_MASK;
468			break;
469		case 2:
470			evtsel |= XSCALE_EVTSEL_EVT2_MASK;
471			break;
472		case 3:
473			evtsel |= XSCALE_EVTSEL_EVT3_MASK;
474			break;
475		default:
476			/* XXX */
477			break;
478		}
479		xscale_evtsel_write(evtsel);
480	}
481
482	return 0;
483}
484
485static int
486xscale_release_pmc(int cpu, int ri, struct pmc *pmc)
487{
488	struct pmc_hw *phw;
489
490	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
491	    ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
492	KASSERT(ri >= 0 && ri < xscale_npmcs,
493	    ("[xscale,%d] illegal row-index %d", __LINE__, ri));
494
495	phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
496	KASSERT(phw->phw_pmc == NULL,
497	    ("[xscale,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
498
499	return 0;
500}
501
502static int
503xscale_intr(int cpu, struct trapframe *tf)
504{
505	printf("intr\n");
506	return 0;
507}
508
509static int
510xscale_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
511{
512	int error;
513	struct pmc_hw *phw;
514	char xscale_name[PMC_NAME_MAX];
515
516	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
517	    ("[xscale,%d], illegal CPU %d", __LINE__, cpu));
518	KASSERT(ri >= 0 && ri < xscale_npmcs,
519	    ("[xscale,%d] row-index %d out of range", __LINE__, ri));
520
521	phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
522	snprintf(xscale_name, sizeof(xscale_name), "XSCALE-%d", ri);
523	if ((error = copystr(xscale_name, pi->pm_name, PMC_NAME_MAX,
524	    NULL)) != 0)
525		return error;
526	pi->pm_class = PMC_CLASS_XSCALE;
527	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
528		pi->pm_enabled = TRUE;
529		*ppmc          = phw->phw_pmc;
530	} else {
531		pi->pm_enabled = FALSE;
532		*ppmc	       = NULL;
533	}
534
535	return (0);
536}
537
538static int
539xscale_get_config(int cpu, int ri, struct pmc **ppm)
540{
541	*ppm = xscale_pcpu[cpu]->pc_xscalepmcs[ri].phw_pmc;
542
543	return 0;
544}
545
546/*
547 * XXX don't know what we should do here.
548 */
549static int
550xscale_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
551{
552	return 0;
553}
554
555static int
556xscale_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
557{
558	return 0;
559}
560
561static int
562xscale_pcpu_init(struct pmc_mdep *md, int cpu)
563{
564	int first_ri, i;
565	struct pmc_cpu *pc;
566	struct xscale_cpu *pac;
567	struct pmc_hw  *phw;
568
569	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
570	    ("[xscale,%d] wrong cpu number %d", __LINE__, cpu));
571	PMCDBG(MDP,INI,1,"xscale-init cpu=%d", cpu);
572
573	xscale_pcpu[cpu] = pac = malloc(sizeof(struct xscale_cpu), M_PMC,
574	    M_WAITOK|M_ZERO);
575	pac->pc_xscalepmcs = malloc(sizeof(struct pmc_hw) * xscale_npmcs,
576	    M_PMC, M_WAITOK|M_ZERO);
577	pc = pmc_pcpu[cpu];
578	first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_XSCALE].pcd_ri;
579	KASSERT(pc != NULL, ("[xscale,%d] NULL per-cpu pointer", __LINE__));
580
581	for (i = 0, phw = pac->pc_xscalepmcs; i < xscale_npmcs; i++, phw++) {
582		phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
583		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
584		phw->phw_pmc      = NULL;
585		pc->pc_hwpmcs[i + first_ri] = phw;
586	}
587
588	/*
589	 * Disable and put the PMUs into power save mode.
590	 */
591	if (xscale_npmcs == 2) {
592		xscale_pmnc_write(XSCALE_PMNC_EVT1_MASK |
593		    XSCALE_PMNC_EVT0_MASK);
594	} else {
595		xscale_evtsel_write(XSCALE_EVTSEL_EVT3_MASK |
596		    XSCALE_EVTSEL_EVT2_MASK | XSCALE_EVTSEL_EVT1_MASK |
597		    XSCALE_EVTSEL_EVT0_MASK);
598	}
599
600	return 0;
601}
602
603static int
604xscale_pcpu_fini(struct pmc_mdep *md, int cpu)
605{
606	return 0;
607}
608
609struct pmc_mdep *
610pmc_xscale_initialize()
611{
612	struct pmc_mdep *pmc_mdep;
613	struct pmc_classdep *pcd;
614	uint32_t idreg;
615
616	/* Get the Core Generation from CP15 */
617	__asm __volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (idreg));
618	xscale_gen = (idreg >> 13) & 0x3;
619	switch (xscale_gen) {
620	case 1:
621		xscale_npmcs = 2;
622		break;
623	case 2:
624	case 3:
625		xscale_npmcs = 4;
626		break;
627	default:
628		printf("%s: unknown XScale core generation\n", __func__);
629		return (NULL);
630	}
631	PMCDBG(MDP,INI,1,"xscale-init npmcs=%d", xscale_npmcs);
632
633	/*
634	 * Allocate space for pointers to PMC HW descriptors and for
635	 * the MDEP structure used by MI code.
636	 */
637	xscale_pcpu = malloc(sizeof(struct xscale_cpu *) * pmc_cpu_max(), M_PMC,
638            M_WAITOK|M_ZERO);
639
640	/* Just one class */
641	pmc_mdep = pmc_mdep_alloc(1);
642
643	pmc_mdep->pmd_cputype = PMC_CPU_INTEL_XSCALE;
644
645	pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_XSCALE];
646	pcd->pcd_caps  = XSCALE_PMC_CAPS;
647	pcd->pcd_class = PMC_CLASS_XSCALE;
648	pcd->pcd_num   = xscale_npmcs;
649	pcd->pcd_ri    = pmc_mdep->pmd_npmc;
650	pcd->pcd_width = 32;
651
652	pcd->pcd_allocate_pmc   = xscale_allocate_pmc;
653	pcd->pcd_config_pmc     = xscale_config_pmc;
654	pcd->pcd_pcpu_fini      = xscale_pcpu_fini;
655	pcd->pcd_pcpu_init      = xscale_pcpu_init;
656	pcd->pcd_describe       = xscale_describe;
657	pcd->pcd_get_config	= xscale_get_config;
658	pcd->pcd_read_pmc       = xscale_read_pmc;
659	pcd->pcd_release_pmc    = xscale_release_pmc;
660	pcd->pcd_start_pmc      = xscale_start_pmc;
661	pcd->pcd_stop_pmc       = xscale_stop_pmc;
662	pcd->pcd_write_pmc      = xscale_write_pmc;
663
664	pmc_mdep->pmd_intr       = xscale_intr;
665	pmc_mdep->pmd_switch_in  = xscale_switch_in;
666	pmc_mdep->pmd_switch_out = xscale_switch_out;
667
668	pmc_mdep->pmd_npmc   += xscale_npmcs;
669
670	return (pmc_mdep);
671}
672
673void
674pmc_xscale_finalize(struct pmc_mdep *md)
675{
676}
677