Lines Matching defs:map

45 static void	adj_cyl_offset(struct dk_map32 *map);
46 static int check_map(struct dk_map32 *map);
47 static void get_user_map(struct dk_map32 *map, int float_part);
48 static void get_user_map_efi(struct dk_gpt *map, int float_part);
75 struct dk_map32 *map = tmp_pinfo->pinfo_map;
167 * Create partition map from existing map
171 map[i].dkl_nblk = cur_parts->pinfo_map[i].dkl_nblk;
172 map[i].dkl_cylno = cur_parts->pinfo_map[i].dkl_cylno;
176 * Make an empty partition map, with all the space
181 map[i].dkl_nblk = 0;
182 map[i].dkl_cylno = 0;
184 map[C_PARTITION].dkl_nblk = ncyl * spc();
190 map[I_PARTITION].dkl_nblk = spc();
191 map[I_PARTITION].dkl_cylno = 0;
193 map[J_PARTITION].dkl_nblk = 2 * spc();
194 map[J_PARTITION].dkl_cylno = spc() / spc();
244 map[free_hog].dkl_nblk = map[C_PARTITION].dkl_nblk;
246 map[free_hog].dkl_nblk -= map[I_PARTITION].dkl_nblk;
248 map[free_hog].dkl_nblk -=
249 map[J_PARTITION].dkl_nblk;
258 if (map[free_hog].dkl_nblk == 0) {
296 get_user_map(map, free_hog);
301 adj_cyl_offset(map);
314 * Update new partition map
317 cur_parts->pinfo_map[i].dkl_nblk = map[i].dkl_nblk;
318 cur_parts->pinfo_map[i].dkl_cylno = map[i].dkl_cylno;
321 map[i].dkl_cylno * nhead * nsect;
323 map[i].dkl_nblk;
349 adj_cyl_offset(map)
350 struct dk_map32 *map;
366 if (i != C_PARTITION && map[i].dkl_nblk) {
367 map[i].dkl_cylno = cyloffset;
368 cyloffset += (map[i].dkl_nblk + (spc()-1))/spc();
369 } else if (map[i].dkl_nblk == 0) {
370 map[i].dkl_cylno = 0;
379 if (i != C_PARTITION && map[i].dkl_nblk) {
380 map[i].dkl_cylno = cyloffset;
381 cyloffset += (map[i].dkl_nblk + (spc()-1))/spc();
382 } else if (map[i].dkl_nblk == 0) {
383 map[i].dkl_cylno = 0;
393 check_map(map)
394 struct dk_map32 *map;
404 cyloffset = map[0].dkl_cylno;
405 tot_blks = map[0].dkl_nblk;
413 if (map[i].dkl_cylno > (blkaddr32_t)ncyl-1) {
416 (PARTITION_BASE+i), map[i].dkl_cylno);
419 if (map[i].dkl_nblk >
420 (blkaddr32_t)(ncyl - map[i].dkl_cylno) * spc()) {
423 (PARTITION_BASE+i), map[i].dkl_nblk);
426 if (i != C_PARTITION && map[i].dkl_nblk) {
431 if (map[i].dkl_cylno < cyloffset) {
435 } else if (map[i].dkl_cylno > cyloffset) {
439 cyloffset += (map[i].dkl_nblk + (spc()-1))/spc();
440 tot_blks = map[i].dkl_nblk;
443 if (tot_blks > map[C_PARTITION].dkl_nblk) {
458 get_user_map(map, float_part)
459 struct dk_map32 *map;
478 ioparam.io_bounds.upper = map[i].dkl_nblk +
479 map[float_part].dkl_nblk;
480 deflt = map[i].dkl_nblk;
492 map[float_part].dkl_nblk -= (newsize - map[i].dkl_nblk);
493 map[i].dkl_nblk = newsize;
552 get_user_map_efi(map, float_part)
553 struct dk_gpt *map;
564 for (i = 0; i < map->efi_nparts - 1; i++) {
569 ioparam.io_bounds.upper = map->efi_last_u_lba;
571 efi_deflt.end_sector = map->efi_parts[i].p_size;
577 map->efi_parts[i].p_tag = V_UNASSIGNED;
578 } else if ((i64 != 0) && (map->efi_parts[i].p_tag ==
580 map->efi_parts[i].p_tag = V_USR;
583 map->efi_parts[i].p_start = 0;
585 map->efi_parts[i].p_start = start_lba;
587 map->efi_parts[i].p_size = i64;
591 map->efi_parts[float_part].p_start = start_lba;
592 map->efi_parts[float_part].p_size = map->efi_last_u_lba -
594 map->efi_parts[float_part].p_tag = V_USR;
595 if (map->efi_parts[float_part].p_size == UINT_MAX64) {
596 map->efi_parts[float_part].p_size = 0;
597 map->efi_parts[float_part].p_start = 0;
598 map->efi_parts[float_part].p_tag = V_UNASSIGNED;
602 for (i = 0; i < map->efi_nparts; i++) {
603 if (map->efi_parts[i].p_tag == V_RESERVED) {
604 map->efi_parts[i].p_start = map->efi_last_u_lba -
606 map->efi_parts[i].p_size = (1024 * 16);