Lines Matching refs:dev_desc

46 static int is_gpt_valid(struct blk_desc *dev_desc, u64 lba,
48 static gpt_entry *alloc_read_gpt_entries(struct blk_desc *dev_desc,
197 int get_disk_guid(struct blk_desc * dev_desc, char *guid) in get_disk_guid() argument
199 ALLOC_CACHE_ALIGN_BUFFER_PAD(gpt_header, gpt_head, 1, dev_desc->rawblksz); in get_disk_guid()
204 if (is_gpt_valid(dev_desc, GPT_PRIMARY_PARTITION_TABLE_LBA, in get_disk_guid()
207 if (is_gpt_valid(dev_desc, dev_desc->rawlba - 1, in get_disk_guid()
224 void part_print_efi(struct blk_desc *dev_desc) in part_print_efi() argument
226 ALLOC_CACHE_ALIGN_BUFFER_PAD(gpt_header, gpt_head, 1, dev_desc->rawblksz); in part_print_efi()
234 if (is_gpt_valid(dev_desc, GPT_PRIMARY_PARTITION_TABLE_LBA, in part_print_efi()
237 if (is_gpt_valid(dev_desc, (dev_desc->rawlba - 1), in part_print_efi()
255 sector = dev_desc->rawblksz / dev_desc->blksz; in part_print_efi()
283 int part_get_info_efi(struct blk_desc *dev_desc, int part, in part_get_info_efi() argument
290 if (!dev_desc->rawblksz || !dev_desc->rawlba) { in part_get_info_efi()
291 dev_desc->rawblksz = dev_desc->blksz; in part_get_info_efi()
292 dev_desc->rawlba = dev_desc->lba; in part_get_info_efi()
295 if (dev_desc->rawblksz == 4096) in part_get_info_efi()
299 gpt_head = memalign(ARCH_DMA_MINALIGN, dev_desc->rawblksz); in part_get_info_efi()
305 if (gpt_head && (gpt_head->last_usable_lba + b_gpt_nsec) != dev_desc->rawlba) { in part_get_info_efi()
306 if (dev_desc->rawblksz == 4096) { in part_get_info_efi()
309 gpt_head = memalign(ARCH_DMA_MINALIGN, dev_desc->rawblksz); in part_get_info_efi()
323 if (is_gpt_valid(dev_desc, GPT_PRIMARY_PARTITION_TABLE_LBA, in part_get_info_efi()
326 if (is_gpt_valid(dev_desc, (dev_desc->rawlba - 1), in part_get_info_efi()
344 sector = dev_desc->rawblksz / dev_desc->blksz; in part_get_info_efi()
353 info->blksz = dev_desc->blksz; in part_get_info_efi()
376 static void gpt_entry_modify(struct blk_desc *dev_desc, in gpt_entry_modify() argument
387 if (dev_desc->rawblksz == 4096) { in gpt_entry_modify()
388 if (gpt_pte[i - 1].ending_lba <= (dev_desc->rawlba - 6)) in gpt_entry_modify()
390 gpt_pte[i - 1].ending_lba = dev_desc->rawlba - 6; in gpt_entry_modify()
392 if (gpt_pte[i - 1].ending_lba <= (dev_desc->rawlba - 0x22)) in gpt_entry_modify()
395 gpt_pte[i - 1].ending_lba = dev_desc->rawlba - 0x41; in gpt_entry_modify()
403 static int part_efi_repair(struct blk_desc *dev_desc, gpt_entry *gpt_pte, in part_efi_repair() argument
410 int sector = dev_desc->rawblksz / dev_desc->blksz; in part_efi_repair()
418 gpt_head->my_lba = dev_desc->rawlba - 1; in part_efi_repair()
421 gpt_head->partition_entry_lba = dev_desc->rawlba - 5; in part_efi_repair()
422 gpt_head->last_usable_lba = cpu_to_le64(dev_desc->rawlba - 6); in part_efi_repair()
424 gpt_head->partition_entry_lba = dev_desc->rawlba - 0x21; in part_efi_repair()
425 gpt_head->last_usable_lba = cpu_to_le64(dev_desc->rawlba - 34); in part_efi_repair()
427 gpt_entry_modify(dev_desc, gpt_pte, gpt_head); in part_efi_repair()
431 blk = le64_to_cpu(dev_desc->rawlba - 1); in part_efi_repair()
432 if (blk_dwrite(dev_desc, blk * sector, sector, gpt_head) != sector) { in part_efi_repair()
439 blk_cnt = BLOCK_CNT(count, dev_desc); in part_efi_repair()
440 if (blk_dwrite(dev_desc, blk * sector, (lbaint_t)blk_cnt, gpt_pte) != in part_efi_repair()
449 gpt_head->alternate_lba = dev_desc->rawlba - 1; in part_efi_repair()
452 gpt_head->last_usable_lba = cpu_to_le64(dev_desc->rawlba - 6); in part_efi_repair()
454 gpt_head->last_usable_lba = cpu_to_le64(dev_desc->rawlba - 34); in part_efi_repair()
456 gpt_entry_modify(dev_desc, gpt_pte, gpt_head); in part_efi_repair()
460 if (blk_dwrite(dev_desc, 1 * sector, sector, gpt_head) != sector) { in part_efi_repair()
467 if (dev_desc->if_type == IF_TYPE_MTD && in part_efi_repair()
468 (dev_desc->devnum == BLK_MTD_NAND || dev_desc->devnum == BLK_MTD_SPI_NAND)) { in part_efi_repair()
469 blk_derase(dev_desc, 0, sector); in part_efi_repair()
477 blk_cnt = BLOCK_CNT(count, dev_desc); in part_efi_repair()
478 if (blk_dwrite(dev_desc, blk * sector, (lbaint_t)blk_cnt, gpt_pte) != in part_efi_repair()
491 static int part_test_efi(struct blk_desc *dev_desc) in part_test_efi() argument
495 if (!dev_desc->rawblksz || !dev_desc->rawlba) { in part_test_efi()
496 dev_desc->rawblksz = dev_desc->blksz; in part_test_efi()
497 dev_desc->rawlba = dev_desc->lba; in part_test_efi()
500 ALLOC_CACHE_ALIGN_BUFFER_PAD(legacy_mbr, legacymbr, 1, dev_desc->rawblksz); in part_test_efi()
503 if ((blk_dread(dev_desc, 0, 1, (ulong *)legacymbr) != 1) in part_test_efi()
517 h_gpt_head = memalign(ARCH_DMA_MINALIGN, dev_desc->rawblksz); in part_test_efi()
519 b_gpt_head = memalign(ARCH_DMA_MINALIGN, dev_desc->rawblksz); in part_test_efi()
521 head_gpt_valid = is_gpt_valid(dev_desc, GPT_PRIMARY_PARTITION_TABLE_LBA, in part_test_efi()
523 backup_gpt_valid = is_gpt_valid(dev_desc, (dev_desc->rawlba - 1), in part_test_efi()
529 if (part_efi_repair(dev_desc, h_gpt_pte, h_gpt_head, in part_test_efi()
537 if (part_efi_repair(dev_desc, h_gpt_pte, h_gpt_head, in part_test_efi()
541 if (part_efi_repair(dev_desc, b_gpt_pte, b_gpt_head, in part_test_efi()
567 static int set_protective_mbr(struct blk_desc *dev_desc) in set_protective_mbr() argument
570 ALLOC_CACHE_ALIGN_BUFFER_PAD(legacy_mbr, p_mbr, 1, dev_desc->rawblksz); in set_protective_mbr()
571 memset(p_mbr, 0, dev_desc->rawblksz); in set_protective_mbr()
579 if (blk_dread(dev_desc, 0, 1, p_mbr) != 1) { in set_protective_mbr()
580 pr_err("** Can't read from device %d **\n", dev_desc->devnum); in set_protective_mbr()
588 p_mbr->partition_record[0].nr_sects = (u32) dev_desc->rawlba - 1; in set_protective_mbr()
591 if (blk_dwrite(dev_desc, 0, 1, p_mbr) != 1) { in set_protective_mbr()
593 dev_desc->devnum); in set_protective_mbr()
600 int write_gpt_table(struct blk_desc *dev_desc, in write_gpt_table() argument
604 * sizeof(gpt_entry)), dev_desc); in write_gpt_table()
607 sector = dev_desc->rawblksz / dev_desc->blksz; in write_gpt_table()
609 debug("max lba: %x\n", (u32) dev_desc->rawlba); in write_gpt_table()
611 if (set_protective_mbr(dev_desc) < 0) in write_gpt_table()
625 if (blk_dwrite(dev_desc, 1 * sector, sector, gpt_h) != sector) in write_gpt_table()
628 if (blk_dwrite(dev_desc, le64_to_cpu(gpt_h->partition_entry_lba * sector), in write_gpt_table()
634 if (blk_dwrite(dev_desc, (lbaint_t)(le64_to_cpu(gpt_h->last_usable_lba) in write_gpt_table()
638 if (blk_dwrite(dev_desc, (lbaint_t)le64_to_cpu(gpt_h->my_lba) * sector, 1, in write_gpt_table()
646 printf("** Can't write to device %d **\n", dev_desc->devnum); in write_gpt_table()
650 int gpt_fill_pte(struct blk_desc *dev_desc, in gpt_fill_pte() argument
673 dev_desc->rawblksz; in gpt_fill_pte()
770 static uint32_t partition_entries_offset(struct blk_desc *dev_desc) in partition_entries_offset() argument
785 offset_bytes = PAD_SIZE(CONFIG_EFI_PARTITION_ENTRIES_OFF, dev_desc->rawblksz); in partition_entries_offset()
786 offset_blks = offset_bytes / dev_desc->rawblksz; in partition_entries_offset()
799 offset_bytes = PAD_SIZE(config_offset, dev_desc->rawblksz); in partition_entries_offset()
800 offset_blks = offset_bytes / dev_desc->rawblksz; in partition_entries_offset()
816 int gpt_fill_header(struct blk_desc *dev_desc, gpt_header *gpt_h, in gpt_fill_header() argument
823 gpt_h->alternate_lba = cpu_to_le64(dev_desc->rawlba - 1); in gpt_fill_header()
825 cpu_to_le64(partition_entries_offset(dev_desc)); in gpt_fill_header()
826 if (dev_desc->rawblksz == 4096) { in gpt_fill_header()
827 gpt_h->last_usable_lba = cpu_to_le64(dev_desc->rawlba - 6); in gpt_fill_header()
831 gpt_h->last_usable_lba = cpu_to_le64(dev_desc->rawlba - 34); in gpt_fill_header()
846 int gpt_restore(struct blk_desc *dev_desc, char *str_disk_guid, in gpt_restore() argument
853 size = PAD_SIZE(sizeof(gpt_header), dev_desc->rawblksz); in gpt_restore()
861 size = PAD_SIZE(GPT_ENTRY_NUMBERS * sizeof(gpt_entry), dev_desc->rawblksz); in gpt_restore()
871 ret = gpt_fill_header(dev_desc, gpt_h, str_disk_guid, parts_count); in gpt_restore()
876 ret = gpt_fill_pte(dev_desc, gpt_h, gpt_e, partitions, parts_count); in gpt_restore()
881 ret = write_gpt_table(dev_desc, gpt_h, gpt_e); in gpt_restore()
912 int gpt_verify_headers(struct blk_desc *dev_desc, gpt_header *gpt_head, in gpt_verify_headers() argument
919 if (is_gpt_valid(dev_desc, in gpt_verify_headers()
926 if (is_gpt_valid(dev_desc, (dev_desc->rawlba - 1), in gpt_verify_headers()
936 int gpt_verify_partitions(struct blk_desc *dev_desc, in gpt_verify_partitions() argument
945 ret = gpt_verify_headers(dev_desc, gpt_head, gpt_pte); in gpt_verify_partitions()
1014 int is_valid_gpt_buf(struct blk_desc *dev_desc, void *buf) in is_valid_gpt_buf() argument
1021 dev_desc->rawblksz); in is_valid_gpt_buf()
1024 != cpu_to_le64(dev_desc->rawlba) && in is_valid_gpt_buf()
1032 dev_desc->rawlba)) in is_valid_gpt_buf()
1037 dev_desc->rawblksz); in is_valid_gpt_buf()
1044 int write_mbr_and_gpt_partitions(struct blk_desc *dev_desc, void *buf) in write_mbr_and_gpt_partitions() argument
1052 if (!dev_desc->rawblksz || !dev_desc->rawlba) { in write_mbr_and_gpt_partitions()
1053 dev_desc->rawblksz = dev_desc->blksz; in write_mbr_and_gpt_partitions()
1054 dev_desc->rawlba = dev_desc->lba; in write_mbr_and_gpt_partitions()
1057 if (is_valid_gpt_buf(dev_desc, buf)) in write_mbr_and_gpt_partitions()
1062 dev_desc->rawblksz); in write_mbr_and_gpt_partitions()
1066 dev_desc->rawblksz); in write_mbr_and_gpt_partitions()
1069 dev_desc); in write_mbr_and_gpt_partitions()
1074 if (blk_dwrite(dev_desc, lba, cnt, buf) != cnt) { in write_mbr_and_gpt_partitions()
1083 if (blk_dwrite(dev_desc, lba, cnt, gpt_h) != cnt) { in write_mbr_and_gpt_partitions()
1091 if (blk_dwrite(dev_desc, lba, cnt, gpt_e) != cnt) { in write_mbr_and_gpt_partitions()
1102 if (blk_dwrite(dev_desc, lba, cnt, gpt_e) != cnt) { in write_mbr_and_gpt_partitions()
1110 if (blk_dwrite(dev_desc, lba, cnt, gpt_h) != cnt) { in write_mbr_and_gpt_partitions()
1183 static int is_gpt_valid(struct blk_desc *dev_desc, u64 lba, in is_gpt_valid() argument
1188 if (!dev_desc || !pgpt_head) { in is_gpt_valid()
1197 ALLOC_CACHE_ALIGN_BUFFER_PAD(legacy_mbr, mbr, 1, dev_desc->rawblksz); in is_gpt_valid()
1199 sector = dev_desc->rawblksz / dev_desc->blksz; in is_gpt_valid()
1201 if (blk_dread(dev_desc, 0, sector, (ulong *)mbr) != sector) { in is_gpt_valid()
1207 if (blk_dread(dev_desc, (lbaint_t)lba * sector, sector, pgpt_head) != sector) { in is_gpt_valid()
1212 if (validate_gpt_header(pgpt_head, (lbaint_t)lba, dev_desc->rawlba)) in is_gpt_valid()
1215 if (dev_desc->sig_type == SIG_TYPE_NONE) { in is_gpt_valid()
1218 dev_desc->sig_type = SIG_TYPE_GUID; in is_gpt_valid()
1219 memcpy(&dev_desc->guid_sig, &pgpt_head->disk_guid, in is_gpt_valid()
1222 dev_desc->sig_type = SIG_TYPE_MBR; in is_gpt_valid()
1223 dev_desc->mbr_sig = mbr->unique_mbr_signature; in is_gpt_valid()
1228 *pgpt_pte = alloc_read_gpt_entries(dev_desc, pgpt_head); in is_gpt_valid()
1253 static gpt_entry *alloc_read_gpt_entries(struct blk_desc *dev_desc, in alloc_read_gpt_entries() argument
1260 if (!dev_desc || !pgpt_head) { in alloc_read_gpt_entries()
1276 PAD_SIZE(count, dev_desc->rawblksz)); in alloc_read_gpt_entries()
1287 blk_cnt = BLOCK_CNT(count, dev_desc); in alloc_read_gpt_entries()
1288 …if (blk_dread(dev_desc, blk * dev_desc->rawblksz / dev_desc->blksz, (lbaint_t)blk_cnt, pte) != blk… in alloc_read_gpt_entries()