mirror of
https://github.com/bkerler/edl.git
synced 2024-11-14 19:14:58 -05:00
Merge pull request #515 from bongbui321/fix_setactive
Fix `cmd_setactiveslot()`
This commit is contained in:
commit
adb2bc78e6
2 changed files with 131 additions and 64 deletions
|
@ -19,7 +19,8 @@ from threading import Thread
|
||||||
|
|
||||||
from edlclient.Library.Modules.nothing import nothing
|
from edlclient.Library.Modules.nothing import nothing
|
||||||
from edlclient.Library.utils import *
|
from edlclient.Library.utils import *
|
||||||
from edlclient.Library.gpt import gpt
|
from edlclient.Library.gpt import gpt, AB_FLAG_OFFSET, AB_PARTITION_ATTR_SLOT_ACTIVE, MAX_PRIORITY, PART_ATT_PRIORITY_BIT
|
||||||
|
from edlclient.Library.gpt import PART_ATT_PRIORITY_VAL, PART_ATT_ACTIVE_VAL, PART_ATT_MAX_RETRY_COUNT_VAL, PART_ATT_SUCCESSFUL_VAL, PART_ATT_UNBOOTABLE_VAL
|
||||||
from edlclient.Library.sparse import QCSparse
|
from edlclient.Library.sparse import QCSparse
|
||||||
from edlclient.Library.utils import progress
|
from edlclient.Library.utils import progress
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
|
@ -212,7 +213,13 @@ class firehose(metaclass=LogBase):
|
||||||
self.nandparttbl = None
|
self.nandparttbl = None
|
||||||
self.nandpart = nand_partition(parent=self, printer=print)
|
self.nandpart = nand_partition(parent=self, printer=print)
|
||||||
|
|
||||||
def detect_partition(self, arguments, partitionname):
|
def detect_partition(self, arguments, partitionname, send_full=False):
|
||||||
|
if arguments is None:
|
||||||
|
arguments = {
|
||||||
|
"--gpt-num-part-entries" : 0,
|
||||||
|
"--gpt-part-entry-size" : 0,
|
||||||
|
"--gpt-part-entry-start-lba" : 0
|
||||||
|
}
|
||||||
fpartitions = {}
|
fpartitions = {}
|
||||||
for lun in self.luns:
|
for lun in self.luns:
|
||||||
lunname = "Lun" + str(lun)
|
lunname = "Lun" + str(lun)
|
||||||
|
@ -224,6 +231,9 @@ class firehose(metaclass=LogBase):
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
if partitionname in guid_gpt.partentries:
|
if partitionname in guid_gpt.partentries:
|
||||||
|
if send_full:
|
||||||
|
return [True, lun, data, guid_gpt]
|
||||||
|
else:
|
||||||
return [True, lun, guid_gpt.partentries[partitionname]]
|
return [True, lun, guid_gpt.partentries[partitionname]]
|
||||||
for part in guid_gpt.partentries:
|
for part in guid_gpt.partentries:
|
||||||
fpartitions[lunname].append(part)
|
fpartitions[lunname].append(part)
|
||||||
|
@ -1303,59 +1313,128 @@ class firehose(metaclass=LogBase):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def cmd_setactiveslot(self, slot: str):
|
def cmd_setactiveslot(self, slot: str):
|
||||||
def cmd_patch_multiple(lun, start_sector_patch, byte_offset_patch, headeroffset, pdata, header):
|
def cmd_patch_multiple(lun, start_sector, byte_offset, patch_data):
|
||||||
offset = 0
|
offset = 0
|
||||||
header_size = len(header)
|
|
||||||
size_each_patch = 4
|
size_each_patch = 4
|
||||||
write_size = len(pdata)
|
write_size = len(patch_data)
|
||||||
for i in range(0, write_size, size_each_patch):
|
for i in range(0, write_size, size_each_patch):
|
||||||
pdata_subset = int(unpack("<I", pdata[offset:offset+size_each_patch])[0])
|
pdata_subset = int(unpack("<I", patch_data[offset:offset+size_each_patch])[0])
|
||||||
self.cmd_patch( lun, start_sector_patch, \
|
self.cmd_patch( lun, start_sector, \
|
||||||
byte_offset_patch + offset, \
|
byte_offset + offset, \
|
||||||
pdata_subset, \
|
pdata_subset, \
|
||||||
size_each_patch, True)
|
size_each_patch, True)
|
||||||
if i < header_size:
|
|
||||||
header_subset = int(unpack("<I", header[offset:offset+size_each_patch])[0])
|
|
||||||
self.cmd_patch( lun, headeroffset, \
|
|
||||||
offset, \
|
|
||||||
header_subset, \
|
|
||||||
size_each_patch, True)
|
|
||||||
offset += size_each_patch
|
offset += size_each_patch
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def set_flags(flags, active, is_boot):
|
||||||
|
new_flags = flags
|
||||||
|
if active:
|
||||||
|
if is_boot:
|
||||||
|
new_flags |= (PART_ATT_PRIORITY_VAL | PART_ATT_ACTIVE_VAL | PART_ATT_MAX_RETRY_COUNT_VAL)
|
||||||
|
new_flags &= (~PART_ATT_SUCCESSFUL_VAL & ~PART_ATT_UNBOOTABLE_VAL)
|
||||||
|
else:
|
||||||
|
new_flags |= AB_PARTITION_ATTR_SLOT_ACTIVE << (AB_FLAG_OFFSET*8)
|
||||||
|
else:
|
||||||
|
if is_boot:
|
||||||
|
new_flags &= (~PART_ATT_PRIORITY_VAL & ~PART_ATT_ACTIVE_VAL)
|
||||||
|
new_flags |= ((MAX_PRIORITY-1) << PART_ATT_PRIORITY_BIT)
|
||||||
|
else:
|
||||||
|
new_flags &= ~(AB_PARTITION_ATTR_SLOT_ACTIVE << (AB_FLAG_OFFSET*8))
|
||||||
|
return new_flags
|
||||||
|
|
||||||
|
def patch_helper(header_data_a, header_data_b, guid_gpt, partition_a, partition_b, slot_a_status, slot_b_status, is_boot):
|
||||||
|
part_entry_size = guid_gpt.header.part_entry_size
|
||||||
|
|
||||||
|
rf_a = BytesIO(header_data_a)
|
||||||
|
rf_b = BytesIO(header_data_b)
|
||||||
|
|
||||||
|
rf_a.seek(partition_a.entryoffset)
|
||||||
|
rf_b.seek(partition_b.entryoffset)
|
||||||
|
|
||||||
|
sdata_a = rf_a.read(part_entry_size)
|
||||||
|
sdata_b = rf_b.read(part_entry_size)
|
||||||
|
|
||||||
|
partentry_a = gpt.gpt_partition(sdata_a)
|
||||||
|
partentry_b = gpt.gpt_partition(sdata_b)
|
||||||
|
|
||||||
|
partentry_a.flags = set_flags(partentry_a.flags, slot_a_status, is_boot)
|
||||||
|
partentry_b.flags = set_flags(partentry_b.flags, slot_b_status, is_boot)
|
||||||
|
partentry_a.type, partentry_b.type = partentry_b.type, partentry_a.type
|
||||||
|
|
||||||
|
pdata_a, pdata_b = partentry_a.create(), partentry_b.create()
|
||||||
|
return pdata_a, partition_a.entryoffset, pdata_b, partition_b.entryoffset
|
||||||
|
|
||||||
if slot.lower() not in ["a", "b"]:
|
if slot.lower() not in ["a", "b"]:
|
||||||
self.error("Only slots a or b are accepted. Aborting.")
|
self.error("Only slots a or b are accepted. Aborting.")
|
||||||
return False
|
return False
|
||||||
partslots = {}
|
slot_a_status = None
|
||||||
if slot == "a":
|
if slot == "a":
|
||||||
partslots["_a"] = True
|
slot_a_status = True
|
||||||
partslots["_b"] = False
|
|
||||||
elif slot == "b":
|
elif slot == "b":
|
||||||
partslots["_a"] = False
|
slot_a_status = False
|
||||||
partslots["_b"] = True
|
slot_b_status = not slot_a_status
|
||||||
fpartitions = {}
|
fpartitions = {}
|
||||||
try:
|
try:
|
||||||
for lun in self.luns:
|
for lun_a in self.luns:
|
||||||
lunname = "Lun" + str(lun)
|
lunname = "Lun" + str(lun_a)
|
||||||
fpartitions[lunname] = []
|
fpartitions[lunname] = []
|
||||||
data, guid_gpt = self.get_gpt(lun, int(0), int(0), int(0))
|
header_data_a, guid_gpt_a = self.get_gpt(lun_a, int(0), int(0), int(0))
|
||||||
if guid_gpt is None:
|
if guid_gpt_a is None:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
for partitionname in guid_gpt.partentries:
|
for partitionname_a in guid_gpt_a.partentries:
|
||||||
gp = gpt()
|
slot = partitionname_a.lower()[-2:]
|
||||||
slot = partitionname.lower()[-2:]
|
if slot == "_a":
|
||||||
if "_a" in slot or "_b" in slot:
|
partitionname_b = partitionname_a[:-1] + "b"
|
||||||
pdata, poffset = gp.patch(data, partitionname, active=partslots[slot])
|
if partitionname_b in guid_gpt_a.partentries:
|
||||||
data[poffset:poffset + len(pdata)] = pdata
|
lun_b = lun_a
|
||||||
wdata = gp.fix_gpt_crc(data)
|
header_data_b = header_data_a
|
||||||
if wdata is not None:
|
guid_gpt_b = guid_gpt_a
|
||||||
start_sector_patch = poffset // self.cfg.SECTOR_SIZE_IN_BYTES
|
else:
|
||||||
byte_offset_patch = poffset % self.cfg.SECTOR_SIZE_IN_BYTES
|
resp = self.detect_partition(arguments=None, partitionname=partitionname_b, send_full=True)
|
||||||
headeroffset = gp.header.current_lba * gp.sectorsize
|
if not resp[0]:
|
||||||
start_sector_hdr = headeroffset // self.cfg.SECTOR_SIZE_IN_BYTES
|
self.error(f"Cannot find partition {partitionname_b}")
|
||||||
header = wdata[start_sector_hdr:start_sector_hdr + gp.header.header_size]
|
return False
|
||||||
cmd_patch_multiple(lun, start_sector_patch, byte_offset_patch, headeroffset, pdata, header)
|
_, lun_b, header_data_b, guid_gpt_b = resp
|
||||||
|
|
||||||
|
part_a = guid_gpt_a.partentries[partitionname_a]
|
||||||
|
part_b = guid_gpt_b.partentries[partitionname_b]
|
||||||
|
is_boot = False
|
||||||
|
if partitionname_a == "boot_a":
|
||||||
|
is_boot = True
|
||||||
|
pdata_a, poffset_a, pdata_b, poffset_b = patch_helper(
|
||||||
|
header_data_a, header_data_b,
|
||||||
|
guid_gpt_a, part_a, part_b,
|
||||||
|
slot_a_status, slot_b_status,
|
||||||
|
is_boot
|
||||||
|
)
|
||||||
|
|
||||||
|
header_data_a[poffset_a : poffset_a+len(pdata_a)] = pdata_a
|
||||||
|
new_header_a = guid_gpt_a.fix_gpt_crc(header_data_a)
|
||||||
|
header_data_b[poffset_b:poffset_b + len(pdata_b)] = pdata_b
|
||||||
|
new_header_b = guid_gpt_b.fix_gpt_crc(header_data_b)
|
||||||
|
|
||||||
|
if new_header_a is not None:
|
||||||
|
start_sector_patch_a = poffset_a // self.cfg.SECTOR_SIZE_IN_BYTES
|
||||||
|
byte_offset_patch_a = poffset_a % self.cfg.SECTOR_SIZE_IN_BYTES
|
||||||
|
cmd_patch_multiple(lun_a, start_sector_patch_a, byte_offset_patch_a, pdata_a)
|
||||||
|
|
||||||
|
# header will be updated in partitionname_b if in same lun
|
||||||
|
if lun_a != lun_b:
|
||||||
|
headeroffset_a = guid_gpt_a.header.current_lba * guid_gpt_a.sectorsize
|
||||||
|
start_sector_hdr_a = guid_gpt_a.header.current_lba
|
||||||
|
pheader_a = new_header_a[headeroffset_a : headeroffset_a+guid_gpt_a.header.header_size]
|
||||||
|
cmd_patch_multiple(lun_a, start_sector_hdr_a, 0, pheader_a)
|
||||||
|
|
||||||
|
if new_header_b is not None:
|
||||||
|
start_sector_patch_b = poffset_b // self.cfg.SECTOR_SIZE_IN_BYTES
|
||||||
|
byte_offset_patch_b = poffset_b % self.cfg.SECTOR_SIZE_IN_BYTES
|
||||||
|
cmd_patch_multiple(lun_b, start_sector_patch_b, byte_offset_patch_b, pdata_b)
|
||||||
|
|
||||||
|
headeroffset_b = guid_gpt_b.header.current_lba * guid_gpt_b.sectorsize
|
||||||
|
start_sector_hdr_b = guid_gpt_b.header.current_lba
|
||||||
|
pheader_b = new_header_b[headeroffset_b : headeroffset_b+guid_gpt_b.header.header_size]
|
||||||
|
cmd_patch_multiple(lun_b, start_sector_hdr_b, 0, pheader_b)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.error(str(err))
|
self.error(str(err))
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -194,6 +194,20 @@ AB_SLOT_ACTIVE = 1
|
||||||
AB_SLOT_INACTIVE = 0
|
AB_SLOT_INACTIVE = 0
|
||||||
|
|
||||||
|
|
||||||
|
PART_ATT_PRIORITY_BIT = 48
|
||||||
|
PART_ATT_ACTIVE_BIT = 50
|
||||||
|
PART_ATT_MAX_RETRY_CNT_BIT = 51
|
||||||
|
MAX_PRIORITY = 3
|
||||||
|
PART_ATT_SUCCESS_BIT = 54
|
||||||
|
PART_ATT_UNBOOTABLE_BIT = 55
|
||||||
|
|
||||||
|
PART_ATT_PRIORITY_VAL = 0x3 << PART_ATT_PRIORITY_BIT
|
||||||
|
PART_ATT_ACTIVE_VAL = 0x1 << PART_ATT_ACTIVE_BIT
|
||||||
|
PART_ATT_MAX_RETRY_COUNT_VAL = 0x7 << PART_ATT_MAX_RETRY_CNT_BIT
|
||||||
|
PART_ATT_SUCCESSFUL_VAL = 0x1 << PART_ATT_SUCCESS_BIT
|
||||||
|
PART_ATT_UNBOOTABLE_VAL = 0x1 << PART_ATT_UNBOOTABLE_BIT
|
||||||
|
|
||||||
|
|
||||||
class gpt(metaclass=LogBase):
|
class gpt(metaclass=LogBase):
|
||||||
class gpt_header:
|
class gpt_header:
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
|
@ -482,32 +496,6 @@ class gpt(metaclass=LogBase):
|
||||||
res = self.print_gptfile(os.path.join("TestFiles", "gpt_sm8180x.bin"))
|
res = self.print_gptfile(os.path.join("TestFiles", "gpt_sm8180x.bin"))
|
||||||
assert res, "GPT Partition wasn't decoded properly"
|
assert res, "GPT Partition wasn't decoded properly"
|
||||||
|
|
||||||
def patch(self, data:bytes, partitionname="boot", active: bool = True):
|
|
||||||
try:
|
|
||||||
rf = BytesIO(data)
|
|
||||||
for sectorsize in [512, 4096]:
|
|
||||||
result = self.parse(data, sectorsize)
|
|
||||||
if result:
|
|
||||||
for rname in self.partentries:
|
|
||||||
if partitionname.lower() == rname.lower():
|
|
||||||
partition = self.partentries[rname]
|
|
||||||
rf.seek(partition.entryoffset)
|
|
||||||
sdata = rf.read(self.header.part_entry_size)
|
|
||||||
partentry = self.gpt_partition(sdata)
|
|
||||||
flags = partentry.flags
|
|
||||||
if active:
|
|
||||||
flags |= AB_PARTITION_ATTR_SLOT_ACTIVE << (AB_FLAG_OFFSET*8)
|
|
||||||
else:
|
|
||||||
flags &= ~(AB_PARTITION_ATTR_SLOT_ACTIVE << (AB_FLAG_OFFSET*8))
|
|
||||||
partentry.flags = flags
|
|
||||||
pdata = partentry.create()
|
|
||||||
return pdata, partition.entryoffset
|
|
||||||
break
|
|
||||||
return None, None
|
|
||||||
except Exception as e:
|
|
||||||
self.error(str(e))
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
def fix_gpt_crc(self, data):
|
def fix_gpt_crc(self, data):
|
||||||
partentry_size = self.header.num_part_entries * self.header.part_entry_size
|
partentry_size = self.header.num_part_entries * self.header.part_entry_size
|
||||||
partentry_offset = self.header.part_entry_start_lba * self.sectorsize
|
partentry_offset = self.header.part_entry_start_lba * self.sectorsize
|
||||||
|
|
Loading…
Reference in a new issue