1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *	fs/proc/vmcore.c Interface for accessing the crash
4 * 				 dump from the system's previous life.
5 * 	Heavily borrowed from fs/proc/kcore.c
6 *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 *	Copyright (C) IBM Corporation, 2004. All rights reserved
8 *
9 */
10
11#include <linux/mm.h>
12#include <linux/kcore.h>
13#include <linux/user.h>
14#include <linux/elf.h>
15#include <linux/elfcore.h>
16#include <linux/export.h>
17#include <linux/slab.h>
18#include <linux/highmem.h>
19#include <linux/printk.h>
20#include <linux/memblock.h>
21#include <linux/init.h>
22#include <linux/crash_dump.h>
23#include <linux/list.h>
24#include <linux/moduleparam.h>
25#include <linux/mutex.h>
26#include <linux/vmalloc.h>
27#include <linux/pagemap.h>
28#include <linux/uio.h>
29#include <linux/cc_platform.h>
30#include <asm/io.h>
31#include "internal.h"
32
33/* List representing chunks of contiguous memory areas and their offsets in
34 * vmcore file.
35 */
36static LIST_HEAD(vmcore_list);
37
38/* Stores the pointer to the buffer containing kernel elf core headers. */
39static char *elfcorebuf;
40static size_t elfcorebuf_sz;
41static size_t elfcorebuf_sz_orig;
42
43static char *elfnotes_buf;
44static size_t elfnotes_sz;
45/* Size of all notes minus the device dump notes */
46static size_t elfnotes_orig_sz;
47
48/* Total size of vmcore file. */
49static u64 vmcore_size;
50
51static struct proc_dir_entry *proc_vmcore;
52
53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54/* Device Dump list and mutex to synchronize access to list */
55static LIST_HEAD(vmcoredd_list);
56static DEFINE_MUTEX(vmcoredd_mutex);
57
58static bool vmcoredd_disabled;
59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
61
62/* Device Dump Size */
63static size_t vmcoredd_orig_sz;
64
65static DEFINE_SPINLOCK(vmcore_cb_lock);
66DEFINE_STATIC_SRCU(vmcore_cb_srcu);
67/* List of registered vmcore callbacks. */
68static LIST_HEAD(vmcore_cb_list);
69/* Whether the vmcore has been opened once. */
70static bool vmcore_opened;
71
72void register_vmcore_cb(struct vmcore_cb *cb)
73{
74	INIT_LIST_HEAD(&cb->next);
75	spin_lock(&vmcore_cb_lock);
76	list_add_tail(&cb->next, &vmcore_cb_list);
77	/*
78	 * Registering a vmcore callback after the vmcore was opened is
79	 * very unusual (e.g., manual driver loading).
80	 */
81	if (vmcore_opened)
82		pr_warn_once("Unexpected vmcore callback registration\n");
83	spin_unlock(&vmcore_cb_lock);
84}
85EXPORT_SYMBOL_GPL(register_vmcore_cb);
86
87void unregister_vmcore_cb(struct vmcore_cb *cb)
88{
89	spin_lock(&vmcore_cb_lock);
90	list_del_rcu(&cb->next);
91	/*
92	 * Unregistering a vmcore callback after the vmcore was opened is
93	 * very unusual (e.g., forced driver removal), but we cannot stop
94	 * unregistering.
95	 */
96	if (vmcore_opened)
97		pr_warn_once("Unexpected vmcore callback unregistration\n");
98	spin_unlock(&vmcore_cb_lock);
99
100	synchronize_srcu(&vmcore_cb_srcu);
101}
102EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
103
104static bool pfn_is_ram(unsigned long pfn)
105{
106	struct vmcore_cb *cb;
107	bool ret = true;
108
109	list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
110				 srcu_read_lock_held(&vmcore_cb_srcu)) {
111		if (unlikely(!cb->pfn_is_ram))
112			continue;
113		ret = cb->pfn_is_ram(cb, pfn);
114		if (!ret)
115			break;
116	}
117
118	return ret;
119}
120
121static int open_vmcore(struct inode *inode, struct file *file)
122{
123	spin_lock(&vmcore_cb_lock);
124	vmcore_opened = true;
125	spin_unlock(&vmcore_cb_lock);
126
127	return 0;
128}
129
130/* Reads a page from the oldmem device from given offset. */
131ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
132			 u64 *ppos, bool encrypted)
133{
134	unsigned long pfn, offset;
135	ssize_t nr_bytes;
136	ssize_t read = 0, tmp;
137	int idx;
138
139	if (!count)
140		return 0;
141
142	offset = (unsigned long)(*ppos % PAGE_SIZE);
143	pfn = (unsigned long)(*ppos / PAGE_SIZE);
144
145	idx = srcu_read_lock(&vmcore_cb_srcu);
146	do {
147		if (count > (PAGE_SIZE - offset))
148			nr_bytes = PAGE_SIZE - offset;
149		else
150			nr_bytes = count;
151
152		/* If pfn is not ram, return zeros for sparse dump files */
153		if (!pfn_is_ram(pfn)) {
154			tmp = iov_iter_zero(nr_bytes, iter);
155		} else {
156			if (encrypted)
157				tmp = copy_oldmem_page_encrypted(iter, pfn,
158								 nr_bytes,
159								 offset);
160			else
161				tmp = copy_oldmem_page(iter, pfn, nr_bytes,
162						       offset);
163		}
164		if (tmp < nr_bytes) {
165			srcu_read_unlock(&vmcore_cb_srcu, idx);
166			return -EFAULT;
167		}
168
169		*ppos += nr_bytes;
170		count -= nr_bytes;
171		read += nr_bytes;
172		++pfn;
173		offset = 0;
174	} while (count);
175	srcu_read_unlock(&vmcore_cb_srcu, idx);
176
177	return read;
178}
179
180/*
181 * Architectures may override this function to allocate ELF header in 2nd kernel
182 */
183int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
184{
185	return 0;
186}
187
188/*
189 * Architectures may override this function to free header
190 */
191void __weak elfcorehdr_free(unsigned long long addr)
192{}
193
194/*
195 * Architectures may override this function to read from ELF header
196 */
197ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
198{
199	struct kvec kvec = { .iov_base = buf, .iov_len = count };
200	struct iov_iter iter;
201
202	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
203
204	return read_from_oldmem(&iter, count, ppos, false);
205}
206
207/*
208 * Architectures may override this function to read from notes sections
209 */
210ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
211{
212	struct kvec kvec = { .iov_base = buf, .iov_len = count };
213	struct iov_iter iter;
214
215	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
216
217	return read_from_oldmem(&iter, count, ppos,
218			cc_platform_has(CC_ATTR_MEM_ENCRYPT));
219}
220
221/*
222 * Architectures may override this function to map oldmem
223 */
224int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
225				  unsigned long from, unsigned long pfn,
226				  unsigned long size, pgprot_t prot)
227{
228	prot = pgprot_encrypted(prot);
229	return remap_pfn_range(vma, from, pfn, size, prot);
230}
231
232/*
233 * Architectures which support memory encryption override this.
234 */
235ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
236		unsigned long pfn, size_t csize, unsigned long offset)
237{
238	return copy_oldmem_page(iter, pfn, csize, offset);
239}
240
241#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
242static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
243{
244	struct vmcoredd_node *dump;
245	u64 offset = 0;
246	int ret = 0;
247	size_t tsz;
248	char *buf;
249
250	mutex_lock(&vmcoredd_mutex);
251	list_for_each_entry(dump, &vmcoredd_list, list) {
252		if (start < offset + dump->size) {
253			tsz = min(offset + (u64)dump->size - start, (u64)size);
254			buf = dump->buf + start - offset;
255			if (copy_to_iter(buf, tsz, iter) < tsz) {
256				ret = -EFAULT;
257				goto out_unlock;
258			}
259
260			size -= tsz;
261			start += tsz;
262
263			/* Leave now if buffer filled already */
264			if (!size)
265				goto out_unlock;
266		}
267		offset += dump->size;
268	}
269
270out_unlock:
271	mutex_unlock(&vmcoredd_mutex);
272	return ret;
273}
274
275#ifdef CONFIG_MMU
276static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
277			       u64 start, size_t size)
278{
279	struct vmcoredd_node *dump;
280	u64 offset = 0;
281	int ret = 0;
282	size_t tsz;
283	char *buf;
284
285	mutex_lock(&vmcoredd_mutex);
286	list_for_each_entry(dump, &vmcoredd_list, list) {
287		if (start < offset + dump->size) {
288			tsz = min(offset + (u64)dump->size - start, (u64)size);
289			buf = dump->buf + start - offset;
290			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
291							tsz)) {
292				ret = -EFAULT;
293				goto out_unlock;
294			}
295
296			size -= tsz;
297			start += tsz;
298			dst += tsz;
299
300			/* Leave now if buffer filled already */
301			if (!size)
302				goto out_unlock;
303		}
304		offset += dump->size;
305	}
306
307out_unlock:
308	mutex_unlock(&vmcoredd_mutex);
309	return ret;
310}
311#endif /* CONFIG_MMU */
312#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
313
314/* Read from the ELF header and then the crash dump. On error, negative value is
315 * returned otherwise number of bytes read are returned.
316 */
317static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
318{
319	ssize_t acc = 0, tmp;
320	size_t tsz;
321	u64 start;
322	struct vmcore *m = NULL;
323
324	if (!iov_iter_count(iter) || *fpos >= vmcore_size)
325		return 0;
326
327	iov_iter_truncate(iter, vmcore_size - *fpos);
328
329	/* Read ELF core header */
330	if (*fpos < elfcorebuf_sz) {
331		tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
332		if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
333			return -EFAULT;
334		*fpos += tsz;
335		acc += tsz;
336
337		/* leave now if filled buffer already */
338		if (!iov_iter_count(iter))
339			return acc;
340	}
341
342	/* Read ELF note segment */
343	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
344		void *kaddr;
345
346		/* We add device dumps before other elf notes because the
347		 * other elf notes may not fill the elf notes buffer
348		 * completely and we will end up with zero-filled data
349		 * between the elf notes and the device dumps. Tools will
350		 * then try to decode this zero-filled data as valid notes
351		 * and we don't want that. Hence, adding device dumps before
352		 * the other elf notes ensure that zero-filled data can be
353		 * avoided.
354		 */
355#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
356		/* Read device dumps */
357		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
358			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
359				  (size_t)*fpos, iov_iter_count(iter));
360			start = *fpos - elfcorebuf_sz;
361			if (vmcoredd_copy_dumps(iter, start, tsz))
362				return -EFAULT;
363
364			*fpos += tsz;
365			acc += tsz;
366
367			/* leave now if filled buffer already */
368			if (!iov_iter_count(iter))
369				return acc;
370		}
371#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
372
373		/* Read remaining elf notes */
374		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
375			  iov_iter_count(iter));
376		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
377		if (copy_to_iter(kaddr, tsz, iter) < tsz)
378			return -EFAULT;
379
380		*fpos += tsz;
381		acc += tsz;
382
383		/* leave now if filled buffer already */
384		if (!iov_iter_count(iter))
385			return acc;
386	}
387
388	list_for_each_entry(m, &vmcore_list, list) {
389		if (*fpos < m->offset + m->size) {
390			tsz = (size_t)min_t(unsigned long long,
391					    m->offset + m->size - *fpos,
392					    iov_iter_count(iter));
393			start = m->paddr + *fpos - m->offset;
394			tmp = read_from_oldmem(iter, tsz, &start,
395					cc_platform_has(CC_ATTR_MEM_ENCRYPT));
396			if (tmp < 0)
397				return tmp;
398			*fpos += tsz;
399			acc += tsz;
400
401			/* leave now if filled buffer already */
402			if (!iov_iter_count(iter))
403				return acc;
404		}
405	}
406
407	return acc;
408}
409
410static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
411{
412	return __read_vmcore(iter, &iocb->ki_pos);
413}
414
415/*
416 * The vmcore fault handler uses the page cache and fills data using the
417 * standard __read_vmcore() function.
418 *
419 * On s390 the fault handler is used for memory regions that can't be mapped
420 * directly with remap_pfn_range().
421 */
422static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
423{
424#ifdef CONFIG_S390
425	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
426	pgoff_t index = vmf->pgoff;
427	struct iov_iter iter;
428	struct kvec kvec;
429	struct page *page;
430	loff_t offset;
431	int rc;
432
433	page = find_or_create_page(mapping, index, GFP_KERNEL);
434	if (!page)
435		return VM_FAULT_OOM;
436	if (!PageUptodate(page)) {
437		offset = (loff_t) index << PAGE_SHIFT;
438		kvec.iov_base = page_address(page);
439		kvec.iov_len = PAGE_SIZE;
440		iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
441
442		rc = __read_vmcore(&iter, &offset);
443		if (rc < 0) {
444			unlock_page(page);
445			put_page(page);
446			return vmf_error(rc);
447		}
448		SetPageUptodate(page);
449	}
450	unlock_page(page);
451	vmf->page = page;
452	return 0;
453#else
454	return VM_FAULT_SIGBUS;
455#endif
456}
457
458static const struct vm_operations_struct vmcore_mmap_ops = {
459	.fault = mmap_vmcore_fault,
460};
461
462/**
463 * vmcore_alloc_buf - allocate buffer in vmalloc memory
464 * @size: size of buffer
465 *
466 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
467 * the buffer to user-space by means of remap_vmalloc_range().
468 *
469 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
470 * disabled and there's no need to allow users to mmap the buffer.
471 */
472static inline char *vmcore_alloc_buf(size_t size)
473{
474#ifdef CONFIG_MMU
475	return vmalloc_user(size);
476#else
477	return vzalloc(size);
478#endif
479}
480
481/*
482 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
483 * essential for mmap_vmcore() in order to map physically
484 * non-contiguous objects (ELF header, ELF note segment and memory
485 * regions in the 1st kernel pointed to by PT_LOAD entries) into
486 * virtually contiguous user-space in ELF layout.
487 */
488#ifdef CONFIG_MMU
489/*
490 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
491 * reported as not being ram with the zero page.
492 *
493 * @vma: vm_area_struct describing requested mapping
494 * @from: start remapping from
495 * @pfn: page frame number to start remapping to
496 * @size: remapping size
497 * @prot: protection bits
498 *
499 * Returns zero on success, -EAGAIN on failure.
500 */
501static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
502				    unsigned long from, unsigned long pfn,
503				    unsigned long size, pgprot_t prot)
504{
505	unsigned long map_size;
506	unsigned long pos_start, pos_end, pos;
507	unsigned long zeropage_pfn = my_zero_pfn(0);
508	size_t len = 0;
509
510	pos_start = pfn;
511	pos_end = pfn + (size >> PAGE_SHIFT);
512
513	for (pos = pos_start; pos < pos_end; ++pos) {
514		if (!pfn_is_ram(pos)) {
515			/*
516			 * We hit a page which is not ram. Remap the continuous
517			 * region between pos_start and pos-1 and replace
518			 * the non-ram page at pos with the zero page.
519			 */
520			if (pos > pos_start) {
521				/* Remap continuous region */
522				map_size = (pos - pos_start) << PAGE_SHIFT;
523				if (remap_oldmem_pfn_range(vma, from + len,
524							   pos_start, map_size,
525							   prot))
526					goto fail;
527				len += map_size;
528			}
529			/* Remap the zero page */
530			if (remap_oldmem_pfn_range(vma, from + len,
531						   zeropage_pfn,
532						   PAGE_SIZE, prot))
533				goto fail;
534			len += PAGE_SIZE;
535			pos_start = pos + 1;
536		}
537	}
538	if (pos > pos_start) {
539		/* Remap the rest */
540		map_size = (pos - pos_start) << PAGE_SHIFT;
541		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
542					   map_size, prot))
543			goto fail;
544	}
545	return 0;
546fail:
547	do_munmap(vma->vm_mm, from, len, NULL);
548	return -EAGAIN;
549}
550
551static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
552			    unsigned long from, unsigned long pfn,
553			    unsigned long size, pgprot_t prot)
554{
555	int ret, idx;
556
557	/*
558	 * Check if a callback was registered to avoid looping over all
559	 * pages without a reason.
560	 */
561	idx = srcu_read_lock(&vmcore_cb_srcu);
562	if (!list_empty(&vmcore_cb_list))
563		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
564	else
565		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
566	srcu_read_unlock(&vmcore_cb_srcu, idx);
567	return ret;
568}
569
570static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
571{
572	size_t size = vma->vm_end - vma->vm_start;
573	u64 start, end, len, tsz;
574	struct vmcore *m;
575
576	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
577	end = start + size;
578
579	if (size > vmcore_size || end > vmcore_size)
580		return -EINVAL;
581
582	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
583		return -EPERM;
584
585	vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
586	vma->vm_ops = &vmcore_mmap_ops;
587
588	len = 0;
589
590	if (start < elfcorebuf_sz) {
591		u64 pfn;
592
593		tsz = min(elfcorebuf_sz - (size_t)start, size);
594		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
595		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
596				    vma->vm_page_prot))
597			return -EAGAIN;
598		size -= tsz;
599		start += tsz;
600		len += tsz;
601
602		if (size == 0)
603			return 0;
604	}
605
606	if (start < elfcorebuf_sz + elfnotes_sz) {
607		void *kaddr;
608
609		/* We add device dumps before other elf notes because the
610		 * other elf notes may not fill the elf notes buffer
611		 * completely and we will end up with zero-filled data
612		 * between the elf notes and the device dumps. Tools will
613		 * then try to decode this zero-filled data as valid notes
614		 * and we don't want that. Hence, adding device dumps before
615		 * the other elf notes ensure that zero-filled data can be
616		 * avoided. This also ensures that the device dumps and
617		 * other elf notes can be properly mmaped at page aligned
618		 * address.
619		 */
620#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
621		/* Read device dumps */
622		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
623			u64 start_off;
624
625			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
626				  (size_t)start, size);
627			start_off = start - elfcorebuf_sz;
628			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
629						start_off, tsz))
630				goto fail;
631
632			size -= tsz;
633			start += tsz;
634			len += tsz;
635
636			/* leave now if filled buffer already */
637			if (!size)
638				return 0;
639		}
640#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
641
642		/* Read remaining elf notes */
643		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
644		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
645		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
646						kaddr, 0, tsz))
647			goto fail;
648
649		size -= tsz;
650		start += tsz;
651		len += tsz;
652
653		if (size == 0)
654			return 0;
655	}
656
657	list_for_each_entry(m, &vmcore_list, list) {
658		if (start < m->offset + m->size) {
659			u64 paddr = 0;
660
661			tsz = (size_t)min_t(unsigned long long,
662					    m->offset + m->size - start, size);
663			paddr = m->paddr + start - m->offset;
664			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
665						    paddr >> PAGE_SHIFT, tsz,
666						    vma->vm_page_prot))
667				goto fail;
668			size -= tsz;
669			start += tsz;
670			len += tsz;
671
672			if (size == 0)
673				return 0;
674		}
675	}
676
677	return 0;
678fail:
679	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
680	return -EAGAIN;
681}
682#else
683static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
684{
685	return -ENOSYS;
686}
687#endif
688
689static const struct proc_ops vmcore_proc_ops = {
690	.proc_open	= open_vmcore,
691	.proc_read_iter	= read_vmcore,
692	.proc_lseek	= default_llseek,
693	.proc_mmap	= mmap_vmcore,
694};
695
696static struct vmcore* __init get_new_element(void)
697{
698	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
699}
700
701static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
702			   struct list_head *vc_list)
703{
704	u64 size;
705	struct vmcore *m;
706
707	size = elfsz + elfnotesegsz;
708	list_for_each_entry(m, vc_list, list) {
709		size += m->size;
710	}
711	return size;
712}
713
714/**
715 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
716 *
717 * @ehdr_ptr: ELF header
718 *
719 * This function updates p_memsz member of each PT_NOTE entry in the
720 * program header table pointed to by @ehdr_ptr to real size of ELF
721 * note segment.
722 */
723static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
724{
725	int i, rc=0;
726	Elf64_Phdr *phdr_ptr;
727	Elf64_Nhdr *nhdr_ptr;
728
729	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
730	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
731		void *notes_section;
732		u64 offset, max_sz, sz, real_sz = 0;
733		if (phdr_ptr->p_type != PT_NOTE)
734			continue;
735		max_sz = phdr_ptr->p_memsz;
736		offset = phdr_ptr->p_offset;
737		notes_section = kmalloc(max_sz, GFP_KERNEL);
738		if (!notes_section)
739			return -ENOMEM;
740		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
741		if (rc < 0) {
742			kfree(notes_section);
743			return rc;
744		}
745		nhdr_ptr = notes_section;
746		while (nhdr_ptr->n_namesz != 0) {
747			sz = sizeof(Elf64_Nhdr) +
748				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
749				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
750			if ((real_sz + sz) > max_sz) {
751				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
752					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
753				break;
754			}
755			real_sz += sz;
756			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
757		}
758		kfree(notes_section);
759		phdr_ptr->p_memsz = real_sz;
760		if (real_sz == 0) {
761			pr_warn("Warning: Zero PT_NOTE entries found\n");
762		}
763	}
764
765	return 0;
766}
767
768/**
769 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
770 * headers and sum of real size of their ELF note segment headers and
771 * data.
772 *
773 * @ehdr_ptr: ELF header
774 * @nr_ptnote: buffer for the number of PT_NOTE program headers
775 * @sz_ptnote: buffer for size of unique PT_NOTE program header
776 *
777 * This function is used to merge multiple PT_NOTE program headers
778 * into a unique single one. The resulting unique entry will have
779 * @sz_ptnote in its phdr->p_mem.
780 *
781 * It is assumed that program headers with PT_NOTE type pointed to by
782 * @ehdr_ptr has already been updated by update_note_header_size_elf64
783 * and each of PT_NOTE program headers has actual ELF note segment
784 * size in its p_memsz member.
785 */
786static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
787						 int *nr_ptnote, u64 *sz_ptnote)
788{
789	int i;
790	Elf64_Phdr *phdr_ptr;
791
792	*nr_ptnote = *sz_ptnote = 0;
793
794	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
795	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
796		if (phdr_ptr->p_type != PT_NOTE)
797			continue;
798		*nr_ptnote += 1;
799		*sz_ptnote += phdr_ptr->p_memsz;
800	}
801
802	return 0;
803}
804
805/**
806 * copy_notes_elf64 - copy ELF note segments in a given buffer
807 *
808 * @ehdr_ptr: ELF header
809 * @notes_buf: buffer into which ELF note segments are copied
810 *
811 * This function is used to copy ELF note segment in the 1st kernel
812 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
813 * size of the buffer @notes_buf is equal to or larger than sum of the
814 * real ELF note segment headers and data.
815 *
816 * It is assumed that program headers with PT_NOTE type pointed to by
817 * @ehdr_ptr has already been updated by update_note_header_size_elf64
818 * and each of PT_NOTE program headers has actual ELF note segment
819 * size in its p_memsz member.
820 */
821static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
822{
823	int i, rc=0;
824	Elf64_Phdr *phdr_ptr;
825
826	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
827
828	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
829		u64 offset;
830		if (phdr_ptr->p_type != PT_NOTE)
831			continue;
832		offset = phdr_ptr->p_offset;
833		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
834					   &offset);
835		if (rc < 0)
836			return rc;
837		notes_buf += phdr_ptr->p_memsz;
838	}
839
840	return 0;
841}
842
843/* Merges all the PT_NOTE headers into one. */
844static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
845					   char **notes_buf, size_t *notes_sz)
846{
847	int i, nr_ptnote=0, rc=0;
848	char *tmp;
849	Elf64_Ehdr *ehdr_ptr;
850	Elf64_Phdr phdr;
851	u64 phdr_sz = 0, note_off;
852
853	ehdr_ptr = (Elf64_Ehdr *)elfptr;
854
855	rc = update_note_header_size_elf64(ehdr_ptr);
856	if (rc < 0)
857		return rc;
858
859	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
860	if (rc < 0)
861		return rc;
862
863	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
864	*notes_buf = vmcore_alloc_buf(*notes_sz);
865	if (!*notes_buf)
866		return -ENOMEM;
867
868	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
869	if (rc < 0)
870		return rc;
871
872	/* Prepare merged PT_NOTE program header. */
873	phdr.p_type    = PT_NOTE;
874	phdr.p_flags   = 0;
875	note_off = sizeof(Elf64_Ehdr) +
876			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
877	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
878	phdr.p_vaddr   = phdr.p_paddr = 0;
879	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
880	phdr.p_align   = 4;
881
882	/* Add merged PT_NOTE program header*/
883	tmp = elfptr + sizeof(Elf64_Ehdr);
884	memcpy(tmp, &phdr, sizeof(phdr));
885	tmp += sizeof(phdr);
886
887	/* Remove unwanted PT_NOTE program headers. */
888	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
889	*elfsz = *elfsz - i;
890	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
891	memset(elfptr + *elfsz, 0, i);
892	*elfsz = roundup(*elfsz, PAGE_SIZE);
893
894	/* Modify e_phnum to reflect merged headers. */
895	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
896
897	/* Store the size of all notes.  We need this to update the note
898	 * header when the device dumps will be added.
899	 */
900	elfnotes_orig_sz = phdr.p_memsz;
901
902	return 0;
903}
904
905/**
906 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
907 *
908 * @ehdr_ptr: ELF header
909 *
910 * This function updates p_memsz member of each PT_NOTE entry in the
911 * program header table pointed to by @ehdr_ptr to real size of ELF
912 * note segment.
913 */
914static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
915{
916	int i, rc=0;
917	Elf32_Phdr *phdr_ptr;
918	Elf32_Nhdr *nhdr_ptr;
919
920	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
921	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
922		void *notes_section;
923		u64 offset, max_sz, sz, real_sz = 0;
924		if (phdr_ptr->p_type != PT_NOTE)
925			continue;
926		max_sz = phdr_ptr->p_memsz;
927		offset = phdr_ptr->p_offset;
928		notes_section = kmalloc(max_sz, GFP_KERNEL);
929		if (!notes_section)
930			return -ENOMEM;
931		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
932		if (rc < 0) {
933			kfree(notes_section);
934			return rc;
935		}
936		nhdr_ptr = notes_section;
937		while (nhdr_ptr->n_namesz != 0) {
938			sz = sizeof(Elf32_Nhdr) +
939				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
940				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
941			if ((real_sz + sz) > max_sz) {
942				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
943					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
944				break;
945			}
946			real_sz += sz;
947			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
948		}
949		kfree(notes_section);
950		phdr_ptr->p_memsz = real_sz;
951		if (real_sz == 0) {
952			pr_warn("Warning: Zero PT_NOTE entries found\n");
953		}
954	}
955
956	return 0;
957}
958
959/**
960 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
961 * headers and sum of real size of their ELF note segment headers and
962 * data.
963 *
964 * @ehdr_ptr: ELF header
965 * @nr_ptnote: buffer for the number of PT_NOTE program headers
966 * @sz_ptnote: buffer for size of unique PT_NOTE program header
967 *
968 * This function is used to merge multiple PT_NOTE program headers
969 * into a unique single one. The resulting unique entry will have
970 * @sz_ptnote in its phdr->p_mem.
971 *
972 * It is assumed that program headers with PT_NOTE type pointed to by
973 * @ehdr_ptr has already been updated by update_note_header_size_elf32
974 * and each of PT_NOTE program headers has actual ELF note segment
975 * size in its p_memsz member.
976 */
977static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
978						 int *nr_ptnote, u64 *sz_ptnote)
979{
980	int i;
981	Elf32_Phdr *phdr_ptr;
982
983	*nr_ptnote = *sz_ptnote = 0;
984
985	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
986	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
987		if (phdr_ptr->p_type != PT_NOTE)
988			continue;
989		*nr_ptnote += 1;
990		*sz_ptnote += phdr_ptr->p_memsz;
991	}
992
993	return 0;
994}
995
996/**
997 * copy_notes_elf32 - copy ELF note segments in a given buffer
998 *
999 * @ehdr_ptr: ELF header
1000 * @notes_buf: buffer into which ELF note segments are copied
1001 *
1002 * This function is used to copy ELF note segment in the 1st kernel
1003 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1004 * size of the buffer @notes_buf is equal to or larger than sum of the
1005 * real ELF note segment headers and data.
1006 *
1007 * It is assumed that program headers with PT_NOTE type pointed to by
1008 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1009 * and each of PT_NOTE program headers has actual ELF note segment
1010 * size in its p_memsz member.
1011 */
1012static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1013{
1014	int i, rc=0;
1015	Elf32_Phdr *phdr_ptr;
1016
1017	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1018
1019	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1020		u64 offset;
1021		if (phdr_ptr->p_type != PT_NOTE)
1022			continue;
1023		offset = phdr_ptr->p_offset;
1024		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1025					   &offset);
1026		if (rc < 0)
1027			return rc;
1028		notes_buf += phdr_ptr->p_memsz;
1029	}
1030
1031	return 0;
1032}
1033
1034/* Merges all the PT_NOTE headers into one. */
1035static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1036					   char **notes_buf, size_t *notes_sz)
1037{
1038	int i, nr_ptnote=0, rc=0;
1039	char *tmp;
1040	Elf32_Ehdr *ehdr_ptr;
1041	Elf32_Phdr phdr;
1042	u64 phdr_sz = 0, note_off;
1043
1044	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1045
1046	rc = update_note_header_size_elf32(ehdr_ptr);
1047	if (rc < 0)
1048		return rc;
1049
1050	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1051	if (rc < 0)
1052		return rc;
1053
1054	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1055	*notes_buf = vmcore_alloc_buf(*notes_sz);
1056	if (!*notes_buf)
1057		return -ENOMEM;
1058
1059	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1060	if (rc < 0)
1061		return rc;
1062
1063	/* Prepare merged PT_NOTE program header. */
1064	phdr.p_type    = PT_NOTE;
1065	phdr.p_flags   = 0;
1066	note_off = sizeof(Elf32_Ehdr) +
1067			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1068	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1069	phdr.p_vaddr   = phdr.p_paddr = 0;
1070	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1071	phdr.p_align   = 4;
1072
1073	/* Add merged PT_NOTE program header*/
1074	tmp = elfptr + sizeof(Elf32_Ehdr);
1075	memcpy(tmp, &phdr, sizeof(phdr));
1076	tmp += sizeof(phdr);
1077
1078	/* Remove unwanted PT_NOTE program headers. */
1079	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1080	*elfsz = *elfsz - i;
1081	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1082	memset(elfptr + *elfsz, 0, i);
1083	*elfsz = roundup(*elfsz, PAGE_SIZE);
1084
1085	/* Modify e_phnum to reflect merged headers. */
1086	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1087
1088	/* Store the size of all notes.  We need this to update the note
1089	 * header when the device dumps will be added.
1090	 */
1091	elfnotes_orig_sz = phdr.p_memsz;
1092
1093	return 0;
1094}
1095
1096/* Add memory chunks represented by program headers to vmcore list. Also update
1097 * the new offset fields of exported program headers. */
1098static int __init process_ptload_program_headers_elf64(char *elfptr,
1099						size_t elfsz,
1100						size_t elfnotes_sz,
1101						struct list_head *vc_list)
1102{
1103	int i;
1104	Elf64_Ehdr *ehdr_ptr;
1105	Elf64_Phdr *phdr_ptr;
1106	loff_t vmcore_off;
1107	struct vmcore *new;
1108
1109	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1110	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1111
1112	/* Skip ELF header, program headers and ELF note segment. */
1113	vmcore_off = elfsz + elfnotes_sz;
1114
1115	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1116		u64 paddr, start, end, size;
1117
1118		if (phdr_ptr->p_type != PT_LOAD)
1119			continue;
1120
1121		paddr = phdr_ptr->p_offset;
1122		start = rounddown(paddr, PAGE_SIZE);
1123		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1124		size = end - start;
1125
1126		/* Add this contiguous chunk of memory to vmcore list.*/
1127		new = get_new_element();
1128		if (!new)
1129			return -ENOMEM;
1130		new->paddr = start;
1131		new->size = size;
1132		list_add_tail(&new->list, vc_list);
1133
1134		/* Update the program header offset. */
1135		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1136		vmcore_off = vmcore_off + size;
1137	}
1138	return 0;
1139}
1140
1141static int __init process_ptload_program_headers_elf32(char *elfptr,
1142						size_t elfsz,
1143						size_t elfnotes_sz,
1144						struct list_head *vc_list)
1145{
1146	int i;
1147	Elf32_Ehdr *ehdr_ptr;
1148	Elf32_Phdr *phdr_ptr;
1149	loff_t vmcore_off;
1150	struct vmcore *new;
1151
1152	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1153	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1154
1155	/* Skip ELF header, program headers and ELF note segment. */
1156	vmcore_off = elfsz + elfnotes_sz;
1157
1158	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1159		u64 paddr, start, end, size;
1160
1161		if (phdr_ptr->p_type != PT_LOAD)
1162			continue;
1163
1164		paddr = phdr_ptr->p_offset;
1165		start = rounddown(paddr, PAGE_SIZE);
1166		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1167		size = end - start;
1168
1169		/* Add this contiguous chunk of memory to vmcore list.*/
1170		new = get_new_element();
1171		if (!new)
1172			return -ENOMEM;
1173		new->paddr = start;
1174		new->size = size;
1175		list_add_tail(&new->list, vc_list);
1176
1177		/* Update the program header offset */
1178		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1179		vmcore_off = vmcore_off + size;
1180	}
1181	return 0;
1182}
1183
1184/* Sets offset fields of vmcore elements. */
1185static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1186				    struct list_head *vc_list)
1187{
1188	loff_t vmcore_off;
1189	struct vmcore *m;
1190
1191	/* Skip ELF header, program headers and ELF note segment. */
1192	vmcore_off = elfsz + elfnotes_sz;
1193
1194	list_for_each_entry(m, vc_list, list) {
1195		m->offset = vmcore_off;
1196		vmcore_off += m->size;
1197	}
1198}
1199
1200static void free_elfcorebuf(void)
1201{
1202	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1203	elfcorebuf = NULL;
1204	vfree(elfnotes_buf);
1205	elfnotes_buf = NULL;
1206}
1207
1208static int __init parse_crash_elf64_headers(void)
1209{
1210	int rc=0;
1211	Elf64_Ehdr ehdr;
1212	u64 addr;
1213
1214	addr = elfcorehdr_addr;
1215
1216	/* Read ELF header */
1217	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1218	if (rc < 0)
1219		return rc;
1220
1221	/* Do some basic Verification. */
1222	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1223		(ehdr.e_type != ET_CORE) ||
1224		!vmcore_elf64_check_arch(&ehdr) ||
1225		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1226		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1227		ehdr.e_version != EV_CURRENT ||
1228		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1229		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1230		ehdr.e_phnum == 0) {
1231		pr_warn("Warning: Core image elf header is not sane\n");
1232		return -EINVAL;
1233	}
1234
1235	/* Read in all elf headers. */
1236	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1237				ehdr.e_phnum * sizeof(Elf64_Phdr);
1238	elfcorebuf_sz = elfcorebuf_sz_orig;
1239	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1240					      get_order(elfcorebuf_sz_orig));
1241	if (!elfcorebuf)
1242		return -ENOMEM;
1243	addr = elfcorehdr_addr;
1244	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1245	if (rc < 0)
1246		goto fail;
1247
1248	/* Merge all PT_NOTE headers into one. */
1249	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1250				      &elfnotes_buf, &elfnotes_sz);
1251	if (rc)
1252		goto fail;
1253	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1254						  elfnotes_sz, &vmcore_list);
1255	if (rc)
1256		goto fail;
1257	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1258	return 0;
1259fail:
1260	free_elfcorebuf();
1261	return rc;
1262}
1263
1264static int __init parse_crash_elf32_headers(void)
1265{
1266	int rc=0;
1267	Elf32_Ehdr ehdr;
1268	u64 addr;
1269
1270	addr = elfcorehdr_addr;
1271
1272	/* Read ELF header */
1273	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1274	if (rc < 0)
1275		return rc;
1276
1277	/* Do some basic Verification. */
1278	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1279		(ehdr.e_type != ET_CORE) ||
1280		!vmcore_elf32_check_arch(&ehdr) ||
1281		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1282		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1283		ehdr.e_version != EV_CURRENT ||
1284		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1285		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1286		ehdr.e_phnum == 0) {
1287		pr_warn("Warning: Core image elf header is not sane\n");
1288		return -EINVAL;
1289	}
1290
1291	/* Read in all elf headers. */
1292	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1293	elfcorebuf_sz = elfcorebuf_sz_orig;
1294	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1295					      get_order(elfcorebuf_sz_orig));
1296	if (!elfcorebuf)
1297		return -ENOMEM;
1298	addr = elfcorehdr_addr;
1299	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1300	if (rc < 0)
1301		goto fail;
1302
1303	/* Merge all PT_NOTE headers into one. */
1304	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1305				      &elfnotes_buf, &elfnotes_sz);
1306	if (rc)
1307		goto fail;
1308	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1309						  elfnotes_sz, &vmcore_list);
1310	if (rc)
1311		goto fail;
1312	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1313	return 0;
1314fail:
1315	free_elfcorebuf();
1316	return rc;
1317}
1318
1319static int __init parse_crash_elf_headers(void)
1320{
1321	unsigned char e_ident[EI_NIDENT];
1322	u64 addr;
1323	int rc=0;
1324
1325	addr = elfcorehdr_addr;
1326	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1327	if (rc < 0)
1328		return rc;
1329	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1330		pr_warn("Warning: Core image elf header not found\n");
1331		return -EINVAL;
1332	}
1333
1334	if (e_ident[EI_CLASS] == ELFCLASS64) {
1335		rc = parse_crash_elf64_headers();
1336		if (rc)
1337			return rc;
1338	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1339		rc = parse_crash_elf32_headers();
1340		if (rc)
1341			return rc;
1342	} else {
1343		pr_warn("Warning: Core image elf header is not sane\n");
1344		return -EINVAL;
1345	}
1346
1347	/* Determine vmcore size. */
1348	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1349				      &vmcore_list);
1350
1351	return 0;
1352}
1353
1354#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1355/**
1356 * vmcoredd_write_header - Write vmcore device dump header at the
1357 * beginning of the dump's buffer.
1358 * @buf: Output buffer where the note is written
1359 * @data: Dump info
1360 * @size: Size of the dump
1361 *
1362 * Fills beginning of the dump's buffer with vmcore device dump header.
1363 */
1364static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1365				  u32 size)
1366{
1367	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1368
1369	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1370	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1371	vdd_hdr->n_type = NT_VMCOREDD;
1372
1373	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1374		sizeof(vdd_hdr->name));
1375	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1376}
1377
1378/**
1379 * vmcoredd_update_program_headers - Update all ELF program headers
1380 * @elfptr: Pointer to elf header
1381 * @elfnotesz: Size of elf notes aligned to page size
1382 * @vmcoreddsz: Size of device dumps to be added to elf note header
1383 *
1384 * Determine type of ELF header (Elf64 or Elf32) and update the elf note size.
1385 * Also update the offsets of all the program headers after the elf note header.
1386 */
1387static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1388					    size_t vmcoreddsz)
1389{
1390	unsigned char *e_ident = (unsigned char *)elfptr;
1391	u64 start, end, size;
1392	loff_t vmcore_off;
1393	u32 i;
1394
1395	vmcore_off = elfcorebuf_sz + elfnotesz;
1396
1397	if (e_ident[EI_CLASS] == ELFCLASS64) {
1398		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1399		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1400
1401		/* Update all program headers */
1402		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1403			if (phdr->p_type == PT_NOTE) {
1404				/* Update note size */
1405				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1406				phdr->p_filesz = phdr->p_memsz;
1407				continue;
1408			}
1409
1410			start = rounddown(phdr->p_offset, PAGE_SIZE);
1411			end = roundup(phdr->p_offset + phdr->p_memsz,
1412				      PAGE_SIZE);
1413			size = end - start;
1414			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1415			vmcore_off += size;
1416		}
1417	} else {
1418		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1419		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1420
1421		/* Update all program headers */
1422		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1423			if (phdr->p_type == PT_NOTE) {
1424				/* Update note size */
1425				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1426				phdr->p_filesz = phdr->p_memsz;
1427				continue;
1428			}
1429
1430			start = rounddown(phdr->p_offset, PAGE_SIZE);
1431			end = roundup(phdr->p_offset + phdr->p_memsz,
1432				      PAGE_SIZE);
1433			size = end - start;
1434			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1435			vmcore_off += size;
1436		}
1437	}
1438}
1439
1440/**
1441 * vmcoredd_update_size - Update the total size of the device dumps and update
1442 * ELF header
1443 * @dump_size: Size of the current device dump to be added to total size
1444 *
1445 * Update the total size of all the device dumps and update the ELF program
1446 * headers. Calculate the new offsets for the vmcore list and update the
1447 * total vmcore size.
1448 */
1449static void vmcoredd_update_size(size_t dump_size)
1450{
1451	vmcoredd_orig_sz += dump_size;
1452	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1453	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1454					vmcoredd_orig_sz);
1455
1456	/* Update vmcore list offsets */
1457	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1458
1459	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1460				      &vmcore_list);
1461	proc_vmcore->size = vmcore_size;
1462}
1463
1464/**
1465 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1466 * @data: dump info.
1467 *
1468 * Allocate a buffer and invoke the calling driver's dump collect routine.
1469 * Write ELF note at the beginning of the buffer to indicate vmcore device
1470 * dump and add the dump to global list.
1471 */
1472int vmcore_add_device_dump(struct vmcoredd_data *data)
1473{
1474	struct vmcoredd_node *dump;
1475	void *buf = NULL;
1476	size_t data_size;
1477	int ret;
1478
1479	if (vmcoredd_disabled) {
1480		pr_err_once("Device dump is disabled\n");
1481		return -EINVAL;
1482	}
1483
1484	if (!data || !strlen(data->dump_name) ||
1485	    !data->vmcoredd_callback || !data->size)
1486		return -EINVAL;
1487
1488	dump = vzalloc(sizeof(*dump));
1489	if (!dump) {
1490		ret = -ENOMEM;
1491		goto out_err;
1492	}
1493
1494	/* Keep size of the buffer page aligned so that it can be mmaped */
1495	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1496			    PAGE_SIZE);
1497
1498	/* Allocate buffer for driver's to write their dumps */
1499	buf = vmcore_alloc_buf(data_size);
1500	if (!buf) {
1501		ret = -ENOMEM;
1502		goto out_err;
1503	}
1504
1505	vmcoredd_write_header(buf, data, data_size -
1506			      sizeof(struct vmcoredd_header));
1507
1508	/* Invoke the driver's dump collection routing */
1509	ret = data->vmcoredd_callback(data, buf +
1510				      sizeof(struct vmcoredd_header));
1511	if (ret)
1512		goto out_err;
1513
1514	dump->buf = buf;
1515	dump->size = data_size;
1516
1517	/* Add the dump to driver sysfs list */
1518	mutex_lock(&vmcoredd_mutex);
1519	list_add_tail(&dump->list, &vmcoredd_list);
1520	mutex_unlock(&vmcoredd_mutex);
1521
1522	vmcoredd_update_size(data_size);
1523	return 0;
1524
1525out_err:
1526	vfree(buf);
1527	vfree(dump);
1528
1529	return ret;
1530}
1531EXPORT_SYMBOL(vmcore_add_device_dump);
1532#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1533
1534/* Free all dumps in vmcore device dump list */
1535static void vmcore_free_device_dumps(void)
1536{
1537#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1538	mutex_lock(&vmcoredd_mutex);
1539	while (!list_empty(&vmcoredd_list)) {
1540		struct vmcoredd_node *dump;
1541
1542		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1543					list);
1544		list_del(&dump->list);
1545		vfree(dump->buf);
1546		vfree(dump);
1547	}
1548	mutex_unlock(&vmcoredd_mutex);
1549#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1550}
1551
1552/* Init function for vmcore module. */
1553static int __init vmcore_init(void)
1554{
1555	int rc = 0;
1556
1557	/* Allow architectures to allocate ELF header in 2nd kernel */
1558	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1559	if (rc)
1560		return rc;
1561	/*
1562	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1563	 * then capture the dump.
1564	 */
1565	if (!(is_vmcore_usable()))
1566		return rc;
1567	rc = parse_crash_elf_headers();
1568	if (rc) {
1569		elfcorehdr_free(elfcorehdr_addr);
1570		pr_warn("Kdump: vmcore not initialized\n");
1571		return rc;
1572	}
1573	elfcorehdr_free(elfcorehdr_addr);
1574	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1575
1576	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1577	if (proc_vmcore)
1578		proc_vmcore->size = vmcore_size;
1579	return 0;
1580}
1581fs_initcall(vmcore_init);
1582
1583/* Cleanup function for vmcore module. */
1584void vmcore_cleanup(void)
1585{
1586	if (proc_vmcore) {
1587		proc_remove(proc_vmcore);
1588		proc_vmcore = NULL;
1589	}
1590
1591	/* clear the vmcore list. */
1592	while (!list_empty(&vmcore_list)) {
1593		struct vmcore *m;
1594
1595		m = list_first_entry(&vmcore_list, struct vmcore, list);
1596		list_del(&m->list);
1597		kfree(m);
1598	}
1599	free_elfcorebuf();
1600
1601	/* clear vmcore device dump list */
1602	vmcore_free_device_dumps();
1603}
1604