• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/powerpc/platforms/iseries/
1/*
2 * Copyright (C) 2001 Troy D. Armstrong  IBM Corporation
3 * Copyright (C) 2004-2005 Stephen Rothwell  IBM Corporation
4 *
5 * This modules exists as an interface between a Linux secondary partition
6 * running on an iSeries and the primary partition's Virtual Service
7 * Processor (VSP) object.  The VSP has final authority over powering on/off
8 * all partitions in the iSeries.  It also provides miscellaneous low-level
9 * machine facility type operations.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
25 */
26
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/completion.h>
32#include <linux/delay.h>
33#include <linux/proc_fs.h>
34#include <linux/dma-mapping.h>
35#include <linux/bcd.h>
36#include <linux/rtc.h>
37#include <linux/slab.h>
38
39#include <asm/time.h>
40#include <asm/uaccess.h>
41#include <asm/paca.h>
42#include <asm/abs_addr.h>
43#include <asm/firmware.h>
44#include <asm/iseries/mf.h>
45#include <asm/iseries/hv_lp_config.h>
46#include <asm/iseries/hv_lp_event.h>
47#include <asm/iseries/it_lp_queue.h>
48
49#include "setup.h"
50
51static int mf_initialized;
52
53/*
54 * This is the structure layout for the Machine Facilites LPAR event
55 * flows.
56 */
57struct vsp_cmd_data {
58	u64 token;
59	u16 cmd;
60	HvLpIndex lp_index;
61	u8 result_code;
62	u32 reserved;
63	union {
64		u64 state;	/* GetStateOut */
65		u64 ipl_type;	/* GetIplTypeOut, Function02SelectIplTypeIn */
66		u64 ipl_mode;	/* GetIplModeOut, Function02SelectIplModeIn */
67		u64 page[4];	/* GetSrcHistoryIn */
68		u64 flag;	/* GetAutoIplWhenPrimaryIplsOut,
69				   SetAutoIplWhenPrimaryIplsIn,
70				   WhiteButtonPowerOffIn,
71				   Function08FastPowerOffIn,
72				   IsSpcnRackPowerIncompleteOut */
73		struct {
74			u64 token;
75			u64 address_type;
76			u64 side;
77			u32 length;
78			u32 offset;
79		} kern;		/* SetKernelImageIn, GetKernelImageIn,
80				   SetKernelCmdLineIn, GetKernelCmdLineIn */
81		u32 length_out;	/* GetKernelImageOut, GetKernelCmdLineOut */
82		u8 reserved[80];
83	} sub_data;
84};
85
86struct vsp_rsp_data {
87	struct completion com;
88	struct vsp_cmd_data *response;
89};
90
91struct alloc_data {
92	u16 size;
93	u16 type;
94	u32 count;
95	u16 reserved1;
96	u8 reserved2;
97	HvLpIndex target_lp;
98};
99
100struct ce_msg_data;
101
102typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
103
104struct ce_msg_comp_data {
105	ce_msg_comp_hdlr handler;
106	void *token;
107};
108
109struct ce_msg_data {
110	u8 ce_msg[12];
111	char reserved[4];
112	struct ce_msg_comp_data *completion;
113};
114
115struct io_mf_lp_event {
116	struct HvLpEvent hp_lp_event;
117	u16 subtype_result_code;
118	u16 reserved1;
119	u32 reserved2;
120	union {
121		struct alloc_data alloc;
122		struct ce_msg_data ce_msg;
123		struct vsp_cmd_data vsp_cmd;
124	} data;
125};
126
127#define subtype_data(a, b, c, d)	\
128		(((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
129
130/*
131 * All outgoing event traffic is kept on a FIFO queue.  The first
132 * pointer points to the one that is outstanding, and all new
133 * requests get stuck on the end.  Also, we keep a certain number of
134 * preallocated pending events so that we can operate very early in
135 * the boot up sequence (before kmalloc is ready).
136 */
137struct pending_event {
138	struct pending_event *next;
139	struct io_mf_lp_event event;
140	MFCompleteHandler hdlr;
141	char dma_data[72];
142	unsigned dma_data_length;
143	unsigned remote_address;
144};
145static spinlock_t pending_event_spinlock;
146static struct pending_event *pending_event_head;
147static struct pending_event *pending_event_tail;
148static struct pending_event *pending_event_avail;
149#define PENDING_EVENT_PREALLOC_LEN 16
150static struct pending_event pending_event_prealloc[PENDING_EVENT_PREALLOC_LEN];
151
152/*
153 * Put a pending event onto the available queue, so it can get reused.
154 * Attention! You must have the pending_event_spinlock before calling!
155 */
156static void free_pending_event(struct pending_event *ev)
157{
158	if (ev != NULL) {
159		ev->next = pending_event_avail;
160		pending_event_avail = ev;
161	}
162}
163
164/*
165 * Enqueue the outbound event onto the stack.  If the queue was
166 * empty to begin with, we must also issue it via the Hypervisor
167 * interface.  There is a section of code below that will touch
168 * the first stack pointer without the protection of the pending_event_spinlock.
169 * This is OK, because we know that nobody else will be modifying
170 * the first pointer when we do this.
171 */
172static int signal_event(struct pending_event *ev)
173{
174	int rc = 0;
175	unsigned long flags;
176	int go = 1;
177	struct pending_event *ev1;
178	HvLpEvent_Rc hv_rc;
179
180	/* enqueue the event */
181	if (ev != NULL) {
182		ev->next = NULL;
183		spin_lock_irqsave(&pending_event_spinlock, flags);
184		if (pending_event_head == NULL)
185			pending_event_head = ev;
186		else {
187			go = 0;
188			pending_event_tail->next = ev;
189		}
190		pending_event_tail = ev;
191		spin_unlock_irqrestore(&pending_event_spinlock, flags);
192	}
193
194	/* send the event */
195	while (go) {
196		go = 0;
197
198		/* any DMA data to send beforehand? */
199		if (pending_event_head->dma_data_length > 0)
200			HvCallEvent_dmaToSp(pending_event_head->dma_data,
201					pending_event_head->remote_address,
202					pending_event_head->dma_data_length,
203					HvLpDma_Direction_LocalToRemote);
204
205		hv_rc = HvCallEvent_signalLpEvent(
206				&pending_event_head->event.hp_lp_event);
207		if (hv_rc != HvLpEvent_Rc_Good) {
208			printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
209					"failed with %d\n", (int)hv_rc);
210
211			spin_lock_irqsave(&pending_event_spinlock, flags);
212			ev1 = pending_event_head;
213			pending_event_head = pending_event_head->next;
214			if (pending_event_head != NULL)
215				go = 1;
216			spin_unlock_irqrestore(&pending_event_spinlock, flags);
217
218			if (ev1 == ev)
219				rc = -EIO;
220			else if (ev1->hdlr != NULL)
221				(*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
222
223			spin_lock_irqsave(&pending_event_spinlock, flags);
224			free_pending_event(ev1);
225			spin_unlock_irqrestore(&pending_event_spinlock, flags);
226		}
227	}
228
229	return rc;
230}
231
232/*
233 * Allocate a new pending_event structure, and initialize it.
234 */
235static struct pending_event *new_pending_event(void)
236{
237	struct pending_event *ev = NULL;
238	HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
239	unsigned long flags;
240	struct HvLpEvent *hev;
241
242	spin_lock_irqsave(&pending_event_spinlock, flags);
243	if (pending_event_avail != NULL) {
244		ev = pending_event_avail;
245		pending_event_avail = pending_event_avail->next;
246	}
247	spin_unlock_irqrestore(&pending_event_spinlock, flags);
248	if (ev == NULL) {
249		ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
250		if (ev == NULL) {
251			printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
252					sizeof(struct pending_event));
253			return NULL;
254		}
255	}
256	memset(ev, 0, sizeof(struct pending_event));
257	hev = &ev->event.hp_lp_event;
258	hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT;
259	hev->xType = HvLpEvent_Type_MachineFac;
260	hev->xSourceLp = HvLpConfig_getLpIndex();
261	hev->xTargetLp = primary_lp;
262	hev->xSizeMinus1 = sizeof(ev->event) - 1;
263	hev->xRc = HvLpEvent_Rc_Good;
264	hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
265			HvLpEvent_Type_MachineFac);
266	hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
267			HvLpEvent_Type_MachineFac);
268
269	return ev;
270}
271
272static int __maybe_unused
273signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
274{
275	struct pending_event *ev = new_pending_event();
276	int rc;
277	struct vsp_rsp_data response;
278
279	if (ev == NULL)
280		return -ENOMEM;
281
282	init_completion(&response.com);
283	response.response = vsp_cmd;
284	ev->event.hp_lp_event.xSubtype = 6;
285	ev->event.hp_lp_event.x.xSubtypeData =
286		subtype_data('M', 'F',  'V',  'I');
287	ev->event.data.vsp_cmd.token = (u64)&response;
288	ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
289	ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
290	ev->event.data.vsp_cmd.result_code = 0xFF;
291	ev->event.data.vsp_cmd.reserved = 0;
292	memcpy(&(ev->event.data.vsp_cmd.sub_data),
293			&(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
294	mb();
295
296	rc = signal_event(ev);
297	if (rc == 0)
298		wait_for_completion(&response.com);
299	return rc;
300}
301
302
303/*
304 * Send a 12-byte CE message to the primary partition VSP object
305 */
306static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
307{
308	struct pending_event *ev = new_pending_event();
309
310	if (ev == NULL)
311		return -ENOMEM;
312
313	ev->event.hp_lp_event.xSubtype = 0;
314	ev->event.hp_lp_event.x.xSubtypeData =
315		subtype_data('M',  'F',  'C',  'E');
316	memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
317	ev->event.data.ce_msg.completion = completion;
318	return signal_event(ev);
319}
320
321/*
322 * Send a 12-byte CE message (with no data) to the primary partition VSP object
323 */
324static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
325{
326	u8 ce_msg[12];
327
328	memset(ce_msg, 0, sizeof(ce_msg));
329	ce_msg[3] = ce_op;
330	return signal_ce_msg(ce_msg, completion);
331}
332
333/*
334 * Send a 12-byte CE message and DMA data to the primary partition VSP object
335 */
336static int dma_and_signal_ce_msg(char *ce_msg,
337		struct ce_msg_comp_data *completion, void *dma_data,
338		unsigned dma_data_length, unsigned remote_address)
339{
340	struct pending_event *ev = new_pending_event();
341
342	if (ev == NULL)
343		return -ENOMEM;
344
345	ev->event.hp_lp_event.xSubtype = 0;
346	ev->event.hp_lp_event.x.xSubtypeData =
347		subtype_data('M', 'F', 'C', 'E');
348	memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
349	ev->event.data.ce_msg.completion = completion;
350	memcpy(ev->dma_data, dma_data, dma_data_length);
351	ev->dma_data_length = dma_data_length;
352	ev->remote_address = remote_address;
353	return signal_event(ev);
354}
355
356/*
357 * Initiate a nice (hopefully) shutdown of Linux.  We simply are
358 * going to try and send the init process a SIGINT signal.  If
359 * this fails (why?), we'll simply force it off in a not-so-nice
360 * manner.
361 */
362static int shutdown(void)
363{
364	int rc = kill_cad_pid(SIGINT, 1);
365
366	if (rc) {
367		printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
368				"hard shutdown commencing\n", rc);
369		mf_power_off();
370	} else
371		printk(KERN_INFO "mf.c: init has been successfully notified "
372				"to proceed with shutdown\n");
373	return rc;
374}
375
376/*
377 * The primary partition VSP object is sending us a new
378 * event flow.  Handle it...
379 */
380static void handle_int(struct io_mf_lp_event *event)
381{
382	struct ce_msg_data *ce_msg_data;
383	struct ce_msg_data *pce_msg_data;
384	unsigned long flags;
385	struct pending_event *pev;
386
387	/* ack the interrupt */
388	event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
389	HvCallEvent_ackLpEvent(&event->hp_lp_event);
390
391	/* process interrupt */
392	switch (event->hp_lp_event.xSubtype) {
393	case 0:	/* CE message */
394		ce_msg_data = &event->data.ce_msg;
395		switch (ce_msg_data->ce_msg[3]) {
396		case 0x5B:	/* power control notification */
397			if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
398				printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
399				if (shutdown() == 0)
400					signal_ce_msg_simple(0xDB, NULL);
401			}
402			break;
403		case 0xC0:	/* get time */
404			spin_lock_irqsave(&pending_event_spinlock, flags);
405			pev = pending_event_head;
406			if (pev != NULL)
407				pending_event_head = pending_event_head->next;
408			spin_unlock_irqrestore(&pending_event_spinlock, flags);
409			if (pev == NULL)
410				break;
411			pce_msg_data = &pev->event.data.ce_msg;
412			if (pce_msg_data->ce_msg[3] != 0x40)
413				break;
414			if (pce_msg_data->completion != NULL) {
415				ce_msg_comp_hdlr handler =
416					pce_msg_data->completion->handler;
417				void *token = pce_msg_data->completion->token;
418
419				if (handler != NULL)
420					(*handler)(token, ce_msg_data);
421			}
422			spin_lock_irqsave(&pending_event_spinlock, flags);
423			free_pending_event(pev);
424			spin_unlock_irqrestore(&pending_event_spinlock, flags);
425			/* send next waiting event */
426			if (pending_event_head != NULL)
427				signal_event(NULL);
428			break;
429		}
430		break;
431	case 1:	/* IT sys shutdown */
432		printk(KERN_INFO "mf.c: Commencing system shutdown\n");
433		shutdown();
434		break;
435	}
436}
437
438/*
439 * The primary partition VSP object is acknowledging the receipt
440 * of a flow we sent to them.  If there are other flows queued
441 * up, we must send another one now...
442 */
443static void handle_ack(struct io_mf_lp_event *event)
444{
445	unsigned long flags;
446	struct pending_event *two = NULL;
447	unsigned long free_it = 0;
448	struct ce_msg_data *ce_msg_data;
449	struct ce_msg_data *pce_msg_data;
450	struct vsp_rsp_data *rsp;
451
452	/* handle current event */
453	if (pending_event_head == NULL) {
454		printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
455		return;
456	}
457
458	switch (event->hp_lp_event.xSubtype) {
459	case 0:     /* CE msg */
460		ce_msg_data = &event->data.ce_msg;
461		if (ce_msg_data->ce_msg[3] != 0x40) {
462			free_it = 1;
463			break;
464		}
465		if (ce_msg_data->ce_msg[2] == 0)
466			break;
467		free_it = 1;
468		pce_msg_data = &pending_event_head->event.data.ce_msg;
469		if (pce_msg_data->completion != NULL) {
470			ce_msg_comp_hdlr handler =
471				pce_msg_data->completion->handler;
472			void *token = pce_msg_data->completion->token;
473
474			if (handler != NULL)
475				(*handler)(token, ce_msg_data);
476		}
477		break;
478	case 4:	/* allocate */
479	case 5:	/* deallocate */
480		if (pending_event_head->hdlr != NULL)
481			(*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
482		free_it = 1;
483		break;
484	case 6:
485		free_it = 1;
486		rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
487		if (rsp == NULL) {
488			printk(KERN_ERR "mf.c: no rsp\n");
489			break;
490		}
491		if (rsp->response != NULL)
492			memcpy(rsp->response, &event->data.vsp_cmd,
493					sizeof(event->data.vsp_cmd));
494		complete(&rsp->com);
495		break;
496	}
497
498	/* remove from queue */
499	spin_lock_irqsave(&pending_event_spinlock, flags);
500	if ((pending_event_head != NULL) && (free_it == 1)) {
501		struct pending_event *oldHead = pending_event_head;
502
503		pending_event_head = pending_event_head->next;
504		two = pending_event_head;
505		free_pending_event(oldHead);
506	}
507	spin_unlock_irqrestore(&pending_event_spinlock, flags);
508
509	/* send next waiting event */
510	if (two != NULL)
511		signal_event(NULL);
512}
513
514/*
515 * This is the generic event handler we are registering with
516 * the Hypervisor.  Ensure the flows are for us, and then
517 * parse it enough to know if it is an interrupt or an
518 * acknowledge.
519 */
520static void hv_handler(struct HvLpEvent *event)
521{
522	if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
523		if (hvlpevent_is_ack(event))
524			handle_ack((struct io_mf_lp_event *)event);
525		else
526			handle_int((struct io_mf_lp_event *)event);
527	} else
528		printk(KERN_ERR "mf.c: alien event received\n");
529}
530
531/*
532 * Global kernel interface to allocate and seed events into the
533 * Hypervisor.
534 */
535void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
536		unsigned size, unsigned count, MFCompleteHandler hdlr,
537		void *user_token)
538{
539	struct pending_event *ev = new_pending_event();
540	int rc;
541
542	if (ev == NULL) {
543		rc = -ENOMEM;
544	} else {
545		ev->event.hp_lp_event.xSubtype = 4;
546		ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
547		ev->event.hp_lp_event.x.xSubtypeData =
548			subtype_data('M', 'F', 'M', 'A');
549		ev->event.data.alloc.target_lp = target_lp;
550		ev->event.data.alloc.type = type;
551		ev->event.data.alloc.size = size;
552		ev->event.data.alloc.count = count;
553		ev->hdlr = hdlr;
554		rc = signal_event(ev);
555	}
556	if ((rc != 0) && (hdlr != NULL))
557		(*hdlr)(user_token, rc);
558}
559EXPORT_SYMBOL(mf_allocate_lp_events);
560
561/*
562 * Global kernel interface to unseed and deallocate events already in
563 * Hypervisor.
564 */
565void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
566		unsigned count, MFCompleteHandler hdlr, void *user_token)
567{
568	struct pending_event *ev = new_pending_event();
569	int rc;
570
571	if (ev == NULL)
572		rc = -ENOMEM;
573	else {
574		ev->event.hp_lp_event.xSubtype = 5;
575		ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
576		ev->event.hp_lp_event.x.xSubtypeData =
577			subtype_data('M', 'F', 'M', 'D');
578		ev->event.data.alloc.target_lp = target_lp;
579		ev->event.data.alloc.type = type;
580		ev->event.data.alloc.count = count;
581		ev->hdlr = hdlr;
582		rc = signal_event(ev);
583	}
584	if ((rc != 0) && (hdlr != NULL))
585		(*hdlr)(user_token, rc);
586}
587EXPORT_SYMBOL(mf_deallocate_lp_events);
588
589/*
590 * Global kernel interface to tell the VSP object in the primary
591 * partition to power this partition off.
592 */
593void mf_power_off(void)
594{
595	printk(KERN_INFO "mf.c: Down it goes...\n");
596	signal_ce_msg_simple(0x4d, NULL);
597	for (;;)
598		;
599}
600
601/*
602 * Global kernel interface to tell the VSP object in the primary
603 * partition to reboot this partition.
604 */
605void mf_reboot(char *cmd)
606{
607	printk(KERN_INFO "mf.c: Preparing to bounce...\n");
608	signal_ce_msg_simple(0x4e, NULL);
609	for (;;)
610		;
611}
612
613/*
614 * Display a single word SRC onto the VSP control panel.
615 */
616void mf_display_src(u32 word)
617{
618	u8 ce[12];
619
620	memset(ce, 0, sizeof(ce));
621	ce[3] = 0x4a;
622	ce[7] = 0x01;
623	ce[8] = word >> 24;
624	ce[9] = word >> 16;
625	ce[10] = word >> 8;
626	ce[11] = word;
627	signal_ce_msg(ce, NULL);
628}
629
630/*
631 * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
632 */
633static __init void mf_display_progress_src(u16 value)
634{
635	u8 ce[12];
636	u8 src[72];
637
638	memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
639	memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
640		"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
641		"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
642		"\x00\x00\x00\x00PROGxxxx                        ",
643		72);
644	src[6] = value >> 8;
645	src[7] = value & 255;
646	src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
647	src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
648	src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
649	src[47] = "0123456789ABCDEF"[value & 15];
650	dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
651}
652
653/*
654 * Clear the VSP control panel.  Used to "erase" an SRC that was
655 * previously displayed.
656 */
657static void mf_clear_src(void)
658{
659	signal_ce_msg_simple(0x4b, NULL);
660}
661
662void __init mf_display_progress(u16 value)
663{
664	if (!mf_initialized)
665		return;
666
667	if (0xFFFF == value)
668		mf_clear_src();
669	else
670		mf_display_progress_src(value);
671}
672
673/*
674 * Initialization code here.
675 */
676void __init mf_init(void)
677{
678	int i;
679
680	spin_lock_init(&pending_event_spinlock);
681
682	for (i = 0; i < PENDING_EVENT_PREALLOC_LEN; i++)
683		free_pending_event(&pending_event_prealloc[i]);
684
685	HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
686
687	/* virtual continue ack */
688	signal_ce_msg_simple(0x57, NULL);
689
690	mf_initialized = 1;
691	mb();
692
693	printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
694			"initialized\n");
695}
696
697struct rtc_time_data {
698	struct completion com;
699	struct ce_msg_data ce_msg;
700	int rc;
701};
702
703static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
704{
705	struct rtc_time_data *rtc = token;
706
707	memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
708	rtc->rc = 0;
709	complete(&rtc->com);
710}
711
712static int mf_set_rtc(struct rtc_time *tm)
713{
714	char ce_time[12];
715	u8 day, mon, hour, min, sec, y1, y2;
716	unsigned year;
717
718	year = 1900 + tm->tm_year;
719	y1 = year / 100;
720	y2 = year % 100;
721
722	sec = tm->tm_sec;
723	min = tm->tm_min;
724	hour = tm->tm_hour;
725	day = tm->tm_mday;
726	mon = tm->tm_mon + 1;
727
728	sec = bin2bcd(sec);
729	min = bin2bcd(min);
730	hour = bin2bcd(hour);
731	mon = bin2bcd(mon);
732	day = bin2bcd(day);
733	y1 = bin2bcd(y1);
734	y2 = bin2bcd(y2);
735
736	memset(ce_time, 0, sizeof(ce_time));
737	ce_time[3] = 0x41;
738	ce_time[4] = y1;
739	ce_time[5] = y2;
740	ce_time[6] = sec;
741	ce_time[7] = min;
742	ce_time[8] = hour;
743	ce_time[10] = day;
744	ce_time[11] = mon;
745
746	return signal_ce_msg(ce_time, NULL);
747}
748
749static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
750{
751	tm->tm_wday = 0;
752	tm->tm_yday = 0;
753	tm->tm_isdst = 0;
754	if (rc) {
755		tm->tm_sec = 0;
756		tm->tm_min = 0;
757		tm->tm_hour = 0;
758		tm->tm_mday = 15;
759		tm->tm_mon = 5;
760		tm->tm_year = 52;
761		return rc;
762	}
763
764	if ((ce_msg[2] == 0xa9) ||
765	    (ce_msg[2] == 0xaf)) {
766		/* TOD clock is not set */
767		tm->tm_sec = 1;
768		tm->tm_min = 1;
769		tm->tm_hour = 1;
770		tm->tm_mday = 10;
771		tm->tm_mon = 8;
772		tm->tm_year = 71;
773		mf_set_rtc(tm);
774	}
775	{
776		u8 year = ce_msg[5];
777		u8 sec = ce_msg[6];
778		u8 min = ce_msg[7];
779		u8 hour = ce_msg[8];
780		u8 day = ce_msg[10];
781		u8 mon = ce_msg[11];
782
783		sec = bcd2bin(sec);
784		min = bcd2bin(min);
785		hour = bcd2bin(hour);
786		day = bcd2bin(day);
787		mon = bcd2bin(mon);
788		year = bcd2bin(year);
789
790		if (year <= 69)
791			year += 100;
792
793		tm->tm_sec = sec;
794		tm->tm_min = min;
795		tm->tm_hour = hour;
796		tm->tm_mday = day;
797		tm->tm_mon = mon;
798		tm->tm_year = year;
799	}
800
801	return 0;
802}
803
804static int mf_get_rtc(struct rtc_time *tm)
805{
806	struct ce_msg_comp_data ce_complete;
807	struct rtc_time_data rtc_data;
808	int rc;
809
810	memset(&ce_complete, 0, sizeof(ce_complete));
811	memset(&rtc_data, 0, sizeof(rtc_data));
812	init_completion(&rtc_data.com);
813	ce_complete.handler = &get_rtc_time_complete;
814	ce_complete.token = &rtc_data;
815	rc = signal_ce_msg_simple(0x40, &ce_complete);
816	if (rc)
817		return rc;
818	wait_for_completion(&rtc_data.com);
819	return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
820}
821
822struct boot_rtc_time_data {
823	int busy;
824	struct ce_msg_data ce_msg;
825	int rc;
826};
827
828static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
829{
830	struct boot_rtc_time_data *rtc = token;
831
832	memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
833	rtc->rc = 0;
834	rtc->busy = 0;
835}
836
837static int mf_get_boot_rtc(struct rtc_time *tm)
838{
839	struct ce_msg_comp_data ce_complete;
840	struct boot_rtc_time_data rtc_data;
841	int rc;
842
843	memset(&ce_complete, 0, sizeof(ce_complete));
844	memset(&rtc_data, 0, sizeof(rtc_data));
845	rtc_data.busy = 1;
846	ce_complete.handler = &get_boot_rtc_time_complete;
847	ce_complete.token = &rtc_data;
848	rc = signal_ce_msg_simple(0x40, &ce_complete);
849	if (rc)
850		return rc;
851	/* We need to poll here as we are not yet taking interrupts */
852	while (rtc_data.busy) {
853		if (hvlpevent_is_pending())
854			process_hvlpevents();
855	}
856	return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
857}
858
859#ifdef CONFIG_PROC_FS
860static int mf_cmdline_proc_show(struct seq_file *m, void *v)
861{
862	char *page, *p;
863	struct vsp_cmd_data vsp_cmd;
864	int rc;
865	dma_addr_t dma_addr;
866
867	/* The HV appears to return no more than 256 bytes of command line */
868	page = kmalloc(256, GFP_KERNEL);
869	if (!page)
870		return -ENOMEM;
871
872	dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE);
873	if (dma_addr == DMA_ERROR_CODE) {
874		kfree(page);
875		return -ENOMEM;
876	}
877	memset(page, 0, 256);
878	memset(&vsp_cmd, 0, sizeof(vsp_cmd));
879	vsp_cmd.cmd = 33;
880	vsp_cmd.sub_data.kern.token = dma_addr;
881	vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
882	vsp_cmd.sub_data.kern.side = (u64)m->private;
883	vsp_cmd.sub_data.kern.length = 256;
884	mb();
885	rc = signal_vsp_instruction(&vsp_cmd);
886	iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE);
887	if (rc) {
888		kfree(page);
889		return rc;
890	}
891	if (vsp_cmd.result_code != 0) {
892		kfree(page);
893		return -ENOMEM;
894	}
895	p = page;
896	while (p - page < 256) {
897		if (*p == '\0' || *p == '\n') {
898			*p = '\n';
899			break;
900		}
901		p++;
902
903	}
904	seq_write(m, page, p - page);
905	kfree(page);
906	return 0;
907}
908
909static int mf_cmdline_proc_open(struct inode *inode, struct file *file)
910{
911	return single_open(file, mf_cmdline_proc_show, PDE(inode)->data);
912}
913
914
915static int mf_side_proc_show(struct seq_file *m, void *v)
916{
917	char mf_current_side = ' ';
918	struct vsp_cmd_data vsp_cmd;
919
920	memset(&vsp_cmd, 0, sizeof(vsp_cmd));
921	vsp_cmd.cmd = 2;
922	vsp_cmd.sub_data.ipl_type = 0;
923	mb();
924
925	if (signal_vsp_instruction(&vsp_cmd) == 0) {
926		if (vsp_cmd.result_code == 0) {
927			switch (vsp_cmd.sub_data.ipl_type) {
928			case 0:	mf_current_side = 'A';
929				break;
930			case 1:	mf_current_side = 'B';
931				break;
932			case 2:	mf_current_side = 'C';
933				break;
934			default:	mf_current_side = 'D';
935				break;
936			}
937		}
938	}
939
940	seq_printf(m, "%c\n", mf_current_side);
941	return 0;
942}
943
944static int mf_side_proc_open(struct inode *inode, struct file *file)
945{
946	return single_open(file, mf_side_proc_show, NULL);
947}
948
949static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer,
950				  size_t count, loff_t *pos)
951{
952	char side;
953	u64 newSide;
954	struct vsp_cmd_data vsp_cmd;
955
956	if (!capable(CAP_SYS_ADMIN))
957		return -EACCES;
958
959	if (count == 0)
960		return 0;
961
962	if (get_user(side, buffer))
963		return -EFAULT;
964
965	switch (side) {
966	case 'A':	newSide = 0;
967			break;
968	case 'B':	newSide = 1;
969			break;
970	case 'C':	newSide = 2;
971			break;
972	case 'D':	newSide = 3;
973			break;
974	default:
975		printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
976		return -EINVAL;
977	}
978
979	memset(&vsp_cmd, 0, sizeof(vsp_cmd));
980	vsp_cmd.sub_data.ipl_type = newSide;
981	vsp_cmd.cmd = 10;
982
983	(void)signal_vsp_instruction(&vsp_cmd);
984
985	return count;
986}
987
988static const struct file_operations mf_side_proc_fops = {
989	.owner		= THIS_MODULE,
990	.open		= mf_side_proc_open,
991	.read		= seq_read,
992	.llseek		= seq_lseek,
993	.release	= single_release,
994	.write		= mf_side_proc_write,
995};
996
997
998static int mf_src_proc_show(struct seq_file *m, void *v)
999{
1000	return 0;
1001}
1002
1003static int mf_src_proc_open(struct inode *inode, struct file *file)
1004{
1005	return single_open(file, mf_src_proc_show, NULL);
1006}
1007
1008static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer,
1009				 size_t count, loff_t *pos)
1010{
1011	char stkbuf[10];
1012
1013	if (!capable(CAP_SYS_ADMIN))
1014		return -EACCES;
1015
1016	if ((count < 4) && (count != 1)) {
1017		printk(KERN_ERR "mf_proc: invalid src\n");
1018		return -EINVAL;
1019	}
1020
1021	if (count > (sizeof(stkbuf) - 1))
1022		count = sizeof(stkbuf) - 1;
1023	if (copy_from_user(stkbuf, buffer, count))
1024		return -EFAULT;
1025
1026	if ((count == 1) && (*stkbuf == '\0'))
1027		mf_clear_src();
1028	else
1029		mf_display_src(*(u32 *)stkbuf);
1030
1031	return count;
1032}
1033
1034static const struct file_operations mf_src_proc_fops = {
1035	.owner		= THIS_MODULE,
1036	.open		= mf_src_proc_open,
1037	.read		= seq_read,
1038	.llseek		= seq_lseek,
1039	.release	= single_release,
1040	.write		= mf_src_proc_write,
1041};
1042
1043static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer,
1044				     size_t count, loff_t *pos)
1045{
1046	void *data = PDE(file->f_path.dentry->d_inode)->data;
1047	struct vsp_cmd_data vsp_cmd;
1048	dma_addr_t dma_addr;
1049	char *page;
1050	int ret = -EACCES;
1051
1052	if (!capable(CAP_SYS_ADMIN))
1053		goto out;
1054
1055	dma_addr = 0;
1056	page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1057	ret = -ENOMEM;
1058	if (page == NULL)
1059		goto out;
1060
1061	ret = -EFAULT;
1062	if (copy_from_user(page, buffer, count))
1063		goto out_free;
1064
1065	memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1066	vsp_cmd.cmd = 31;
1067	vsp_cmd.sub_data.kern.token = dma_addr;
1068	vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1069	vsp_cmd.sub_data.kern.side = (u64)data;
1070	vsp_cmd.sub_data.kern.length = count;
1071	mb();
1072	(void)signal_vsp_instruction(&vsp_cmd);
1073	ret = count;
1074
1075out_free:
1076	iseries_hv_free(count, page, dma_addr);
1077out:
1078	return ret;
1079}
1080
1081static const struct file_operations mf_cmdline_proc_fops = {
1082	.owner		= THIS_MODULE,
1083	.open		= mf_cmdline_proc_open,
1084	.read		= seq_read,
1085	.llseek		= seq_lseek,
1086	.release	= single_release,
1087	.write		= mf_cmdline_proc_write,
1088};
1089
1090static ssize_t proc_mf_change_vmlinux(struct file *file,
1091				      const char __user *buf,
1092				      size_t count, loff_t *ppos)
1093{
1094	struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
1095	ssize_t rc;
1096	dma_addr_t dma_addr;
1097	char *page;
1098	struct vsp_cmd_data vsp_cmd;
1099
1100	rc = -EACCES;
1101	if (!capable(CAP_SYS_ADMIN))
1102		goto out;
1103
1104	dma_addr = 0;
1105	page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1106	rc = -ENOMEM;
1107	if (page == NULL) {
1108		printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
1109		goto out;
1110	}
1111	rc = -EFAULT;
1112	if (copy_from_user(page, buf, count))
1113		goto out_free;
1114
1115	memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1116	vsp_cmd.cmd = 30;
1117	vsp_cmd.sub_data.kern.token = dma_addr;
1118	vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1119	vsp_cmd.sub_data.kern.side = (u64)dp->data;
1120	vsp_cmd.sub_data.kern.offset = *ppos;
1121	vsp_cmd.sub_data.kern.length = count;
1122	mb();
1123	rc = signal_vsp_instruction(&vsp_cmd);
1124	if (rc)
1125		goto out_free;
1126	rc = -ENOMEM;
1127	if (vsp_cmd.result_code != 0)
1128		goto out_free;
1129
1130	*ppos += count;
1131	rc = count;
1132out_free:
1133	iseries_hv_free(count, page, dma_addr);
1134out:
1135	return rc;
1136}
1137
1138static const struct file_operations proc_vmlinux_operations = {
1139	.write		= proc_mf_change_vmlinux,
1140};
1141
1142static int __init mf_proc_init(void)
1143{
1144	struct proc_dir_entry *mf_proc_root;
1145	struct proc_dir_entry *ent;
1146	struct proc_dir_entry *mf;
1147	char name[2];
1148	int i;
1149
1150	if (!firmware_has_feature(FW_FEATURE_ISERIES))
1151		return 0;
1152
1153	mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1154	if (!mf_proc_root)
1155		return 1;
1156
1157	name[1] = '\0';
1158	for (i = 0; i < 4; i++) {
1159		name[0] = 'A' + i;
1160		mf = proc_mkdir(name, mf_proc_root);
1161		if (!mf)
1162			return 1;
1163
1164		ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf,
1165				       &mf_cmdline_proc_fops, (void *)(long)i);
1166		if (!ent)
1167			return 1;
1168
1169		if (i == 3)	/* no vmlinux entry for 'D' */
1170			continue;
1171
1172		ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf,
1173				       &proc_vmlinux_operations,
1174				       (void *)(long)i);
1175		if (!ent)
1176			return 1;
1177	}
1178
1179	ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1180			  &mf_side_proc_fops);
1181	if (!ent)
1182		return 1;
1183
1184	ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1185			  &mf_src_proc_fops);
1186	if (!ent)
1187		return 1;
1188
1189	return 0;
1190}
1191
1192__initcall(mf_proc_init);
1193
1194#endif /* CONFIG_PROC_FS */
1195
1196/*
1197 * Get the RTC from the virtual service processor
1198 * This requires flowing LpEvents to the primary partition
1199 */
1200void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
1201{
1202	mf_get_rtc(rtc_tm);
1203	rtc_tm->tm_mon--;
1204}
1205
1206/*
1207 * Set the RTC in the virtual service processor
1208 * This requires flowing LpEvents to the primary partition
1209 */
1210int iSeries_set_rtc_time(struct rtc_time *tm)
1211{
1212	mf_set_rtc(tm);
1213	return 0;
1214}
1215
1216unsigned long iSeries_get_boot_time(void)
1217{
1218	struct rtc_time tm;
1219
1220	mf_get_boot_rtc(&tm);
1221	return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
1222		      tm.tm_hour, tm.tm_min, tm.tm_sec);
1223}
1224