Lines Matching refs:image

104 static struct page *kimage_alloc_page(struct kimage *image,
108 int sanity_check_segment_list(struct kimage *image)
111 unsigned long nr_segments = image->nr_segments;
118 * the new image into invalid or reserved areas of RAM. This
131 mstart = image->segment[i].mem;
132 mend = mstart + image->segment[i].memsz;
150 mstart = image->segment[i].mem;
151 mend = mstart + image->segment[i].memsz;
155 pstart = image->segment[j].mem;
156 pend = pstart + image->segment[j].memsz;
169 if (image->segment[i].bufsz > image->segment[i].memsz)
179 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
182 total_pages += PAGE_COUNT(image->segment[i].memsz);
192 * attempt to load the new image into invalid or reserved
199 if (image->type == KEXEC_TYPE_CRASH) {
203 mstart = image->segment[i].mem;
204 mend = mstart + image->segment[i].memsz - 1;
218 struct kimage *image;
221 image = kzalloc(sizeof(*image), GFP_KERNEL);
222 if (!image)
225 image->head = 0;
226 image->entry = &image->head;
227 image->last_entry = &image->head;
228 image->control_page = ~0; /* By default this does not apply */
229 image->type = KEXEC_TYPE_DEFAULT;
232 INIT_LIST_HEAD(&image->control_pages);
235 INIT_LIST_HEAD(&image->dest_pages);
238 INIT_LIST_HEAD(&image->unusable_pages);
241 image->hp_action = KEXEC_CRASH_HP_NONE;
242 image->elfcorehdr_index = -1;
243 image->elfcorehdr_updated = false;
246 return image;
249 int kimage_is_destination_range(struct kimage *image,
255 for (i = 0; i < image->nr_segments; i++) {
258 mstart = image->segment[i].mem;
259 mend = mstart + image->segment[i].memsz - 1;
318 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
332 * At worst this runs in O(N) of the image size.
355 kimage_is_destination_range(image, addr, eaddr)) {
363 list_add(&pages->lru, &image->control_pages);
369 * to give it an entry in image->segment[].
375 * page allocations, and add everything to image->dest_pages.
385 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
414 hole_start = ALIGN(image->control_page, size);
424 for (i = 0; i < image->nr_segments; i++) {
427 mstart = image->segment[i].mem;
428 mend = mstart + image->segment[i].memsz - 1;
437 if (i == image->nr_segments) {
439 image->control_page = hole_end + 1;
453 struct page *kimage_alloc_control_pages(struct kimage *image,
458 switch (image->type) {
460 pages = kimage_alloc_normal_control_pages(image, order);
464 pages = kimage_alloc_crash_control_pages(image, order);
472 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
474 if (*image->entry != 0)
475 image->entry++;
477 if (image->entry == image->last_entry) {
481 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
486 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
487 image->entry = ind_page;
488 image->last_entry = ind_page +
491 *image->entry = entry;
492 image->entry++;
493 *image->entry = 0;
498 static int kimage_set_destination(struct kimage *image,
503 return kimage_add_entry(image, destination | IND_DESTINATION);
507 static int kimage_add_page(struct kimage *image, unsigned long page)
511 return kimage_add_entry(image, page | IND_SOURCE);
515 static void kimage_free_extra_pages(struct kimage *image)
518 kimage_free_page_list(&image->dest_pages);
521 kimage_free_page_list(&image->unusable_pages);
525 void kimage_terminate(struct kimage *image)
527 if (*image->entry != 0)
528 image->entry++;
530 *image->entry = IND_DONE;
533 #define for_each_kimage_entry(image, ptr, entry) \
534 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
546 void kimage_free(struct kimage *image)
551 if (!image)
555 if (image->vmcoreinfo_data_copy) {
557 vunmap(image->vmcoreinfo_data_copy);
561 kimage_free_extra_pages(image);
562 for_each_kimage_entry(image, ptr, entry) {
579 machine_kexec_cleanup(image);
582 kimage_free_page_list(&image->control_pages);
588 if (image->file_mode)
589 kimage_file_post_load_cleanup(image);
591 kfree(image);
594 static kimage_entry_t *kimage_dst_used(struct kimage *image,
600 for_each_kimage_entry(image, ptr, entry) {
613 static struct page *kimage_alloc_page(struct kimage *image,
642 list_for_each_entry(page, &image->dest_pages, lru) {
660 list_add(&page->lru, &image->unusable_pages);
670 if (!kimage_is_destination_range(image, addr,
679 old = kimage_dst_used(image, addr);
703 list_add(&page->lru, &image->dest_pages);
709 static int kimage_load_normal_segment(struct kimage *image,
718 if (image->file_mode)
726 result = kimage_set_destination(image, maddr);
735 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
740 result = kimage_add_page(image, page_to_boot_pfn(page)
755 if (image->file_mode)
760 if (image->file_mode)
780 static int kimage_load_crash_segment(struct kimage *image,
794 if (image->file_mode)
824 if (image->file_mode)
829 if (image->file_mode)
851 int kimage_load_segment(struct kimage *image,
856 switch (image->type) {
858 result = kimage_load_normal_segment(image, segment);
862 result = kimage_load_crash_segment(image, segment);