1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#include <sys/types.h>
29#include <sys/cpr.h>
30#include <sys/ddi.h>
31#include "cprboot.h"
32
33
34/*
35 * check if any cpd_t pages clash with the statefile buffer and shuffle
36 * buf pages to free space; since kpages are saved in ascending order,
37 * any buf pages preceding the current statefile buffer offset can be
38 * written because those pages have already been read and restored
39 */
40static void
41shuffle_pages(cpd_t *descp)
42{
43	pfn_t low_src_ppn, dst_ppn, tail_ppn, new_ppn;
44	size_t dst_off;
45
46	/*
47	 * set the lowest source buf ppn for the (precede) comparison
48	 * below; the ORIG macro is used for the case where the src buf
49	 * page had already been moved - and would confuse the compare
50	 */
51	low_src_ppn = SF_ORIG_PPN(sfile.buf_offset);
52
53	tail_ppn = descp->cpd_pfn + descp->cpd_pages;
54	for (dst_ppn = descp->cpd_pfn; dst_ppn < tail_ppn; dst_ppn++) {
55		/*
56		 * if the dst page is outside the range of statefile
57		 * buffer phys pages, it's OK to write that page;
58		 * buf pages may have been moved outside the range,
59		 * but only to locations isolated from any dst page
60		 */
61		if (dst_ppn < sfile.low_ppn || dst_ppn > sfile.high_ppn) {
62			SF_STAT_INC(outside);
63			continue;
64		}
65
66		/*
67		 * the dst page is inside the range of buf ppns;
68		 * dont need to move the buf page if the dst page
69		 * precedes the lowest src buf page
70		 */
71		if (dst_ppn < low_src_ppn) {
72			SF_STAT_INC(precede);
73			continue;
74		}
75
76		/*
77		 * the dst page clashes with the statefile buffer;
78		 * move the buf page to a free location and update
79		 * the buffer map
80		 */
81		new_ppn = find_apage();
82		phys_xcopy(PN_TO_ADDR(dst_ppn), PN_TO_ADDR(new_ppn),
83		    MMU_PAGESIZE);
84		dst_off = mmu_ptob(dst_ppn - sfile.low_ppn);
85		SF_BUF_PPN(dst_off) = new_ppn;
86		SF_STAT_INC(move);
87	}
88}
89
90
91/*
92 * map-in source statefile buffer pages (read-only) at CB_SRC_VIRT;
93 * sets the starting source vaddr with correct page offset
94 */
95static void
96mapin_buf_pages(size_t datalen, caddr_t *srcp)
97{
98	int dtlb_index, pg_off;
99	caddr_t vaddr, tail;
100	size_t off, bytes;
101	pfn_t src_ppn;
102
103	dtlb_index = cb_dents - CB_MAX_KPAGES - 1;
104	off = sfile.buf_offset;
105	pg_off = off & MMU_PAGEOFFSET;
106	bytes = PAGE_ROUNDUP(pg_off + datalen);
107	vaddr = (caddr_t)CB_SRC_VIRT;
108	*srcp = vaddr + pg_off;
109
110	for (tail = vaddr + bytes; vaddr < tail; vaddr += MMU_PAGESIZE) {
111		src_ppn = SF_BUF_PPN(off);
112		cb_mapin(vaddr, src_ppn, TTE8K, 0, dtlb_index);
113		dtlb_index--;
114		off += MMU_PAGESIZE;
115	}
116}
117
118
119/*
120 * map-in destination kernel pages (read/write) at CB_DST_VIRT
121 */
122static void
123mapin_dst_pages(cpd_t *descp)
124{
125	int dtlb_index, pages;
126	caddr_t vaddr;
127	pfn_t dst_ppn;
128
129	dtlb_index = cb_dents - 1;
130	vaddr = (caddr_t)CB_DST_VIRT;
131	dst_ppn = descp->cpd_pfn;
132	for (pages = 0; pages < descp->cpd_pages; pages++) {
133		cb_mapin(vaddr, dst_ppn, TTE8K, TTE_HWWR_INT, dtlb_index);
134		dtlb_index--;
135		vaddr += MMU_PAGESIZE;
136		dst_ppn++;
137	}
138}
139
140
141/*
142 * run a checksum on un/compressed data when flag is set
143 */
144static int
145kdata_cksum(void *data, cpd_t *descp, uint_t flag)
146{
147	uint_t sum, expect;
148	size_t len;
149
150	if ((descp->cpd_flag & flag) == 0)
151		return (0);
152	else if (flag == CPD_CSUM) {
153		expect = descp->cpd_csum;
154		len = descp->cpd_length;
155	} else {
156		expect = descp->cpd_usum;
157		len = mmu_ptob(descp->cpd_pages);
158	}
159	sum = checksum32(data, len);
160	if (sum != expect) {
161		prom_printf("\n%scompressed data checksum error, "
162		    "expect 0x%x, got 0x%x\n", (flag == CPD_USUM) ? "un" : "",
163		    expect, sum);
164		return (ERR);
165	}
166
167	return (0);
168}
169
170
171/*
172 * primary kpage restoration routine
173 */
174static int
175restore_page_group(cpd_t *descp)
176{
177	caddr_t dst, datap;
178	size_t size, len;
179	caddr_t src;
180	int raw;
181
182#if defined(lint)
183	(void) compress(0, 0, 0);
184#endif
185
186	/*
187	 * move any source buf pages that clash with dst kernel pages;
188	 * create tlb entries for the orig/new source buf pages and
189	 * the dst kpages
190	 */
191	shuffle_pages(descp);
192	mapin_buf_pages(descp->cpd_length, &src);
193	mapin_dst_pages(descp);
194
195	/*
196	 * for compressed pages, run a checksum at the src vaddr and
197	 * decompress to the mapped-in dst kpages; for uncompressed pages,
198	 * just copy direct; uncompressed checksums are used for either
199	 * uncompressed src data or decompressed result data
200	 */
201	dst = (caddr_t)CB_DST_VIRT;
202	if (descp->cpd_flag & CPD_COMPRESS) {
203		if (kdata_cksum(src, descp, CPD_CSUM))
204			return (ERR);
205		size = mmu_ptob(descp->cpd_pages);
206		len = decompress(src, dst, descp->cpd_length, size);
207		if (len != size) {
208			prom_printf("\nbad decompressed len %lu, size %lu\n",
209			    len, size);
210			return (ERR);
211		}
212		raw = 0;
213		datap = dst;
214	} else {
215		raw = 1;
216		datap = src;
217	}
218	if (kdata_cksum(datap, descp, CPD_USUM))
219		return (ERR);
220	if (raw)
221		bcopy(src, dst, descp->cpd_length);
222
223	/*
224	 * advance past the kdata for this cpd_t
225	 */
226	SF_ADV(descp->cpd_length);
227
228	return (0);
229}
230
231
232/*
233 * mapin part of the statefile buffer, copy to the virt destination,
234 * and advance the statefile buffer offset.  this is used primarily
235 * to copy thousands of tiny cpd_t into aligned struct space.
236 */
237static void
238get_phys_data(void *vdst, size_t size)
239{
240	caddr_t src;
241
242	mapin_buf_pages(size, &src);
243	bcopy(src, vdst, size);
244	SF_ADV(size);
245}
246
247
248/*
249 * clear leftover locked dtlb entries
250 */
251static void
252dtlb_cleanup(void)
253{
254	int dtlb_index;
255	caddr_t vaddr;
256	tte_t tte;
257
258	CB_VENTRY(dtlb_cleanup);
259
260	dtlb_index = cb_dents - CB_MAX_KPAGES - CB_MAX_BPAGES - 1;
261	for (; dtlb_index < cb_dents; dtlb_index++) {
262		get_dtlb_entry(dtlb_index, &vaddr, &tte);
263		if (TTE_IS_LOCKED(&tte)) {
264			tte.ll = 0;
265			set_dtlb_entry(dtlb_index, (caddr_t)0, &tte);
266			CB_VPRINTF(("    cleared dtlb entry %x\n", dtlb_index));
267		}
268	}
269}
270
271
272/*
273 * before calling this routine, all cprboot phys pages
274 * are isolated from kernel pages; now we can restore
275 * kpages from the statefile buffer
276 */
277int
278cb_restore_kpages(void)
279{
280	int npages, compressed, regular;
281	cpd_t desc;
282	char *str;
283
284	str = "cb_restore_kpages";
285	CB_VPRINTF((ent_fmt, str, entry));
286
287	CPR_DEBUG(CPR_DEBUG1, "%s: restoring kpages... ", prog);
288	npages = compressed = regular = 0;
289	while (npages < sfile.kpages) {
290		get_phys_data(&desc, sizeof (desc));
291		if (desc.cpd_magic != CPR_PAGE_MAGIC) {
292			prom_printf("\nbad page magic 0x%x, expect 0x%x\n",
293			    desc.cpd_magic, CPR_PAGE_MAGIC);
294			return (ERR);
295		}
296		if (restore_page_group(&desc))
297			return (ERR);
298		npages += desc.cpd_pages;
299
300		if (desc.cpd_flag & CPD_COMPRESS)
301			compressed += desc.cpd_pages;
302		else
303			regular += desc.cpd_pages;
304
305		/*
306		 * display a spin char for every 32 page groups
307		 * (a full spin <= each MB restored)
308		 */
309		if ((sfile.ngroups++ & 0x1f) == 0)
310			cb_spin();
311	}
312	CPR_DEBUG(CPR_DEBUG1, " \b\n");
313
314	dtlb_cleanup();
315
316	if (verbose) {
317		prom_printf("\npage stats: total %d, outside %d, "
318		    "move %d, precede %d\n", sfile.kpages, sfile.outside,
319		    sfile.move, sfile.precede);
320		prom_printf("page stats: ngroups %d, recycle %d\n",
321		    sfile.ngroups, sfile.recycle);
322	}
323
324	CPR_DEBUG(CPR_DEBUG4,
325	    "%s: total=%d, npages=%d, compressed=%d, regular=%d\n",
326	    str, sfile.kpages, npages, compressed, regular);
327
328	/*
329	 * sanity check
330	 */
331	if (npages != sfile.kpages) {
332		prom_printf("\n%s: page count mismatch, expect %d, got %d\n",
333		    str, sfile.kpages, npages);
334		return (ERR);
335	}
336
337	return (0);
338}
339
340
341/*
342 * check and update the statefile terminator;
343 * on exit there will be a leftover tlb entry,
344 * but it will soon get replaced by restore_tlb()
345 */
346int
347cb_terminator(void)
348{
349	ctrm_t cterm;
350
351	CB_VENTRY(cb_terminator);
352	get_phys_data(&cterm, sizeof (cterm));
353	if (cterm.magic != CPR_TERM_MAGIC) {
354		prom_printf("\nbad term magic 0x%x, expect 0x%x\n",
355		    cterm.magic, CPR_TERM_MAGIC);
356		return (ERR);
357	}
358	cterm.tm_cprboot_start.tv_sec = cb_msec / 1000;
359	cb_mapin((caddr_t)CB_DST_VIRT, cterm.pfn,
360	    TTE8K, TTE_HWWR_INT, cb_dents - 1);
361	cpr_update_terminator(&cterm, (caddr_t)CB_DST_VIRT);
362	return (0);
363}
364