[coreboot-gerrit] Change in ...coreboot[master]: qcs405 [temp]: Combine BB with QC-Sec for ROM boot

Name of user not set (Code Review) gerrit at coreboot.org
Fri Nov 30 11:14:14 CET 2018


nsekar at codeaurora.org has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/29973


Change subject: qcs405 [temp]: Combine BB with QC-Sec for ROM boot
......................................................................

qcs405 [temp]: Combine BB with QC-Sec for ROM boot

Some of the changes in this patch are part of the SDM845 upstream
patches. Those will be needed until the sdm845 patches are
merged. After that the remaining delta would have to be patched
out and pushed separately.

TEST=build & run

Change-Id: Ief4d92214cdc7ec06e90b0c7e73c11b6d6deddb9
Signed-off-by: Sricharan R <sricharan at codeaurora.org>
Signed-off-by: Nitheesh Sekar <nsekar at codeaurora.org>
---
M src/arch/arm64/armv8/cpu.S
M src/soc/qualcomm/ipq40xx/Kconfig
M src/soc/qualcomm/ipq40xx/mbn_header.h
M src/soc/qualcomm/ipq806x/Makefile.inc
M src/soc/qualcomm/ipq806x/mbn_header.h
M src/soc/qualcomm/qcs405/Makefile.inc
M src/soc/qualcomm/sdm845/Makefile.inc
R util/qualcomm/createxbl.py
M util/qualcomm/description.md
R util/qualcomm/ipqheader.py
R util/qualcomm/mbn_tools.py
R util/qualcomm/mbncat.py
A util/qualcomm/qgpt.py
M util/qualcomm/scripts/cmm/debug_cb_405.cmm
M util/qualcomm/scripts/cmm/debug_cb_845.cmm
M util/qualcomm/scripts/cmm/debug_cb_common.cmm
M util/qualcomm/scripts/cmm/pbl32_to_bootblock64_jump.cmm
17 files changed, 483 insertions(+), 224 deletions(-)



  git pull ssh://review.coreboot.org:29418/coreboot refs/changes/73/29973/1

diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
index d3fd9d3..038e711 100644
--- a/src/arch/arm64/armv8/cpu.S
+++ b/src/arch/arm64/armv8/cpu.S
@@ -148,7 +148,7 @@
 2:
 	stp	x0, x0, [x1], #16
 	cmp	x1, x2
-	bne	2b
+	ble	2b
 
 	/* Initialize stack with sentinel value to later check overflow. */
 	ldr	x2, =0xdeadbeefdeadbeef
diff --git a/src/soc/qualcomm/ipq40xx/Kconfig b/src/soc/qualcomm/ipq40xx/Kconfig
index 2cfcd2d..849d306 100644
--- a/src/soc/qualcomm/ipq40xx/Kconfig
+++ b/src/soc/qualcomm/ipq40xx/Kconfig
@@ -48,7 +48,7 @@
 config SBL_UTIL_PATH
 	depends on USE_BLOBS
 	string "Path for utils to combine SBL_ELF and bootblock"
-	default "util/ipqheader"
+	default "util/qualcomm"
 	help
 	  Path for utils to combine SBL_ELF and bootblock
 
diff --git a/src/soc/qualcomm/ipq40xx/mbn_header.h b/src/soc/qualcomm/ipq40xx/mbn_header.h
index cedcf12..a48de1c 100644
--- a/src/soc/qualcomm/ipq40xx/mbn_header.h
+++ b/src/soc/qualcomm/ipq40xx/mbn_header.h
@@ -18,7 +18,7 @@
 
 #include <types.h>
 
-/* QCA firmware blob header gleaned from util/ipqheader/ipqheader.py */
+/* QCA firmware blob header gleaned from util/qualcomm/ipqheader.py */
 
 struct mbn_header {
 	u32	mbn_type;
diff --git a/src/soc/qualcomm/ipq806x/Makefile.inc b/src/soc/qualcomm/ipq806x/Makefile.inc
index 8a428b2..1fd134a 100644
--- a/src/soc/qualcomm/ipq806x/Makefile.inc
+++ b/src/soc/qualcomm/ipq806x/Makefile.inc
@@ -62,14 +62,14 @@
 # Add MBN header to allow SBL3 to start coreboot bootblock
 $(objcbfs)/bootblock.mbn: $(objcbfs)/bootblock.raw.bin
 	@printf "    ADD MBN    $(subst $(obj)/,,$(@))\n"
-	./util/ipqheader/ipqheader.py $(call loadaddr,bootblock) $< $@.tmp
+	./util/qualcomm/ipqheader.py $(call loadaddr,bootblock) $< $@.tmp
 	@mv $@.tmp $@
 
 # Create a complete bootblock which will start up the system
 $(objcbfs)/bootblock.bin: $(call strip_quotes,$(CONFIG_SBL_BLOB)) \
 			   $(objcbfs)/bootblock.mbn
 	@printf "    MBNCAT     $(subst $(obj)/,,$(@))\n"
-	@util/ipqheader/mbncat.py -o $@.tmp $^
+	@util/qualcomm/mbncat.py -o $@.tmp $^
 	@mv $@.tmp $@
 
 endif
diff --git a/src/soc/qualcomm/ipq806x/mbn_header.h b/src/soc/qualcomm/ipq806x/mbn_header.h
index 1e6a32f..c7b38d3 100644
--- a/src/soc/qualcomm/ipq806x/mbn_header.h
+++ b/src/soc/qualcomm/ipq806x/mbn_header.h
@@ -18,7 +18,7 @@
 
 #include <types.h>
 
-/* Qualcomm firmware blob header gleaned from util/ipqheader/ipqheader.py */
+/* Qualcomm firmware blob header gleaned from util/qualcomm/ipqheader.py */
 
 struct mbn_header {
 	u32	mbn_type;
diff --git a/src/soc/qualcomm/qcs405/Makefile.inc b/src/soc/qualcomm/qcs405/Makefile.inc
index 47ec7ca..40dcb9b 100644
--- a/src/soc/qualcomm/qcs405/Makefile.inc
+++ b/src/soc/qualcomm/qcs405/Makefile.inc
@@ -90,7 +90,7 @@
 qc_sec_file := $(shell ls $(QC_SEC_FILE))
 ifneq (,$(findstring $(QC_SEC_FILE),$(qc_sec_file)))
 $(objcbfs)/bootblock.bin: $(objcbfs)/bootblock.elf
-	@util/qualcomm/createxbl.py -f $(objcbfs)/bootblock.elf \
+	@python util/qualcomm/createxbl.py -f $(objcbfs)/bootblock.elf \
 		-x $(QC_SEC_FILE) -o $(objcbfs)/merged_bb_qcsec.mbn \
 		-a 64 -d 32 -c 32
 ifeq ($(CONFIG_QC_FLASH_SIMULATE_SDCARD),y)
diff --git a/src/soc/qualcomm/sdm845/Makefile.inc b/src/soc/qualcomm/sdm845/Makefile.inc
index 507d913..6f3f3bf 100644
--- a/src/soc/qualcomm/sdm845/Makefile.inc
+++ b/src/soc/qualcomm/sdm845/Makefile.inc
@@ -31,8 +31,22 @@
 
 CPPFLAGS_common += -Isrc/soc/qualcomm/sdm845/include
 
+SDM845_BLOB := $(top)/3rdparty/blobs/soc/qualcomm/sdm845
+
+################################################################################
+QC_SEC_FILE := $(SDM845_BLOB)/qc_sec.mbn
+qc_sec_file := $(shell ls $(QC_SEC_FILE))
+ifneq (,$(findstring $(QC_SEC_FILE),$(qc_sec_file)))
+$(objcbfs)/bootblock.bin: $(objcbfs)/bootblock.elf
+	@util/qualcomm/createxbl.py -f $(objcbfs)/bootblock.elf \
+		-x $(QC_SEC_FILE) -o $(objcbfs)/merged_bb_qcsec.mbn \
+		-a 64 -d 64 -c 64
+	@util/qualcomm/qgpt.py $(objcbfs)/merged_bb_qcsec.mbn \
+		$(objcbfs)/bootblock.bin
+else
 $(objcbfs)/bootblock.bin: $(objcbfs)/bootblock.raw.bin
 	@printf "Generating: $(subst $(obj)/,,$(@))\n"
 	cp $(objcbfs)/bootblock.raw.bin $(objcbfs)/bootblock.bin
+endif
 
 endif
diff --git a/util/ipqheader/createxbl.py b/util/qualcomm/createxbl.py
similarity index 87%
rename from util/ipqheader/createxbl.py
rename to util/qualcomm/createxbl.py
index 1efd8ba..655b0ec 100755
--- a/util/ipqheader/createxbl.py
+++ b/util/qualcomm/createxbl.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python2
 #============================================================================
 #
 #/** @file createxbl.py
@@ -6,32 +5,9 @@
 # GENERAL DESCRIPTION
 #   Concatentates XBL segments into one ELF image
 #
-# Copyright (c) 2016, The Linux Foundation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of The Linux Foundation nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-# ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2014-2016 by QUALCOMM Technologies, Incorporated.
+# All Rights Reserved.
+# QUALCOMM Proprietary/GTDR
 #
 #**/
 #
@@ -44,7 +20,9 @@
 #
 # when       who       what, where, why
 # --------   ---       ------------------------------------------------------
-# 09/04/15   et        Added -x and -d to embed xbl_sec ELF
+# 04/16/16   et        Appended path to import correct mbn_tools
+# 11/09/15   plc       Update pflag for xblsec segment
+# 09/04/15   et        Added -x and -d to embed xbl_sec ELF 
 # 02/11/15   ck        Fixed missing elf type check in ZI OOB feature
 # 11/04/14   ck        Updated calls to mbn_tools functions
 # 10/22/14   ck        Added -z option to remove out of bounds ZI segments when converting from 64 to 32
@@ -60,15 +38,19 @@
 import os
 import sys
 import shutil
+
+# Add path to mbn_tools
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
+                'sectools','sectools','features','isc','parsegen'))
 import mbn_tools
 
-PAGE_SIZE       = 4096
-SEGMENT_ALIGN   = 16
-ELF32_HDR_SIZE  = 52
-ELF32_PHDR_SIZE = 32
-ELF64_HDR_SIZE  = 64
-ELF64_PHDR_SIZE = 56
-
+PAGE_SIZE              = 4096
+SEGMENT_ALIGN          = 16
+ELF32_HDR_SIZE         = 52
+ELF32_PHDR_SIZE        = 32
+ELF64_HDR_SIZE         = 64
+ELF64_PHDR_SIZE        = 56
+SEGMENT_ALIGN_4K       = 4096
 
 ##############################################################################
 # main
@@ -83,7 +65,7 @@
   parser.add_option("-s", "--second_filepath",
                     action="store", type="string", dest="elf_inp_file2",
                     help="Second ELF file to merge.")
-
+  
   parser.add_option("-x", "--xbl_sec_filepath",
                     action="store", type="string", dest="elf_inp_xbl_sec",
                     help="Second ELF file to merge.")
@@ -99,7 +81,7 @@
   parser.add_option("-b", "--second_elf_arch",
                     action="store", type="string", dest="elf_2_arch",
                     help="Second ELF file architecture.  '32' or '64'")
-
+  
   parser.add_option("-d", "--xbl_sec_elf_arch",
                     action="store", type="string", dest="elf_xbl_sec_arch",
                     help="xbl_sec file architecture.  '32' or '64'")
@@ -118,7 +100,7 @@
                     help="Removes ZI segments that have addresses greater" + \
                          " than 32 bits when converting from a 64 to 32 bit ELF")
 
-
+  
   (options, args) = parser.parse_args()
   if not options.elf_inp_file1:
     parser.error('First ELF filename not given')
@@ -128,7 +110,7 @@
 
   if not options.elf_1_arch:
     parser.error('First ELF architecture not given')
-
+  
   if (not options.elf_1_arch == '64') and (not options.elf_1_arch == '32'):
     parser.error('Invalid First ELF architecture given')
 
@@ -136,7 +118,7 @@
   if options.elf_inp_file2:
     if (not options.elf_2_arch == '64') and (not options.elf_2_arch == '32'):
       parser.error('Invalid Second ELF architecture given')
-
+  
   # Only evaluate elf_xbl_sec_arch if file is given
   if options.elf_inp_xbl_sec:
     if (not options.elf_xbl_sec_arch == '64') and (not options.elf_xbl_sec_arch == '32'):
@@ -159,7 +141,7 @@
     elf_inp_file2 = options.elf_inp_file2
   else:
     elf_inp_file2 = ""
-
+    
   # Do same for xbl_sec
   elf_inp_xbl_sec = options.elf_inp_xbl_sec if options.elf_inp_xbl_sec else ""
 
@@ -179,7 +161,7 @@
       is_elf2_64_bit = False
   else:
     is_elf2_64_bit = False
-
+  
   if options.elf_inp_xbl_sec:
     if options.elf_xbl_sec_arch == '64':
       is_elf_xbl_sec_64_bit = True
@@ -187,7 +169,7 @@
       is_elf_xbl_sec_64_bit = False
   else:
     is_elf_xbl_sec_64_bit = False
-
+  
   # If output ELF arch is given then set is_out_elf_64_bit accordingly.
   # If not then default to be input1's setting
   if options.elf_out_arch:
@@ -207,7 +189,7 @@
 
 
   mbn_type = 'elf'
-  header_format = 'reg'
+  header_format = 'reg' 
   gen_dict['IMAGE_KEY_IMAGE_ID'] = mbn_tools.ImageType.APPSBL_IMG
   #gen_dict['IMAGE_KEY_IMAGE_SOURCE'] = 0
   #gen_dict['IMAGE_KEY_IMAGE_DEST'] = 0
@@ -224,9 +206,9 @@
   target_nonsec = target_base + "_combined_hash.mbn"
 
 
-  #print "Input file 1:", elf_inp_file1
-  #print "Input file 2:", elf_inp_file2
-  #print "Output file:", binary_out
+  print "Input file 1:", elf_inp_file1
+  print "Input file 2:", elf_inp_file2
+  print "Output file:", binary_out
 
   merge_elfs([],
              elf_inp_file1,
@@ -238,7 +220,7 @@
        is_elf_xbl_sec_64_bit,
 	     is_out_elf_64_bit,
 	     zi_oob_enabled)
-
+  
 
   # Hash the image if user did not explicitly say not to
   if options.hash_image:
@@ -246,21 +228,21 @@
     shutil.move(merged_elf, binary_out)
   else:
     shutil.copy(merged_elf, source_elf)
-
-    # Create hash table
+  
+    # Create hash table 
     rv = mbn_tools.pboot_gen_elf([],
                                  source_elf,
-				 target_hash,
+				 target_hash, 
                                  elf_out_file_name = target_phdr_elf,
-                                 secure_type = image_header_secflag)
+                                 secure_type = image_header_secflag)        
     if rv:
        raise RuntimeError, "Failed to run pboot_gen_elf"
 
     # Create hash table header
-    rv = mbn_tools.image_header([],
+    rv = mbn_tools.image_header(os.environ,
                                 gen_dict,
 				target_hash,
-				target_hash_hd,
+				target_hash_hd, 
                          	image_header_secflag,
 				elf_file_name = source_elf)
     if rv:
@@ -269,7 +251,7 @@
     files_to_cat_in_order = [target_hash_hd, target_hash]
     mbn_tools.concat_files (target_nonsec, files_to_cat_in_order)
 
-    # Add the hash segment into the ELF
+    # Add the hash segment into the ELF 
     mbn_tools.pboot_add_hash([],
                              target_phdr_elf,
                              target_nonsec,
@@ -287,7 +269,7 @@
 ##############################################################################
 # merge_elfs
 ##############################################################################
-def merge_elfs(env,
+def merge_elfs(env, 
                elf_in_file_name1,
                elf_in_file_name2,
                elf_in_file_xbl_sec,
@@ -299,17 +281,17 @@
 	             zi_oob_enabled):
 
   [elf_header1, phdr_table1] = \
-    mbn_tools.preprocess_elf_file(elf_in_file_name1)
+    mbn_tools.preprocess_elf_file(elf_in_file_name1) 
 
   # Check to make sure second file path exists before using
   if elf_in_file_name2 != "":
     [elf_header2, phdr_table2] = \
-      mbn_tools.preprocess_elf_file(elf_in_file_name2)
-
+      mbn_tools.preprocess_elf_file(elf_in_file_name2) 
+      
   # Check to make sure xbl_sec file path exists before using
   if elf_in_file_xbl_sec != "":
     [elf_headerxblsec, phdr_tablexblsec] = \
-      mbn_tools.preprocess_elf_file(elf_in_file_xbl_sec)
+      mbn_tools.preprocess_elf_file(elf_in_file_xbl_sec) 
 
   # Open Files
   elf_in_fp1 = mbn_tools.OPEN(elf_in_file_name1, "rb")
@@ -340,7 +322,7 @@
     else:
       phdr_total_size += elf_header2.e_phnum * ELF32_PHDR_SIZE
       phdr_total_count += elf_header2.e_phnum
-
+  
   # Account for xbl_sec header if included
   if elf_in_file_xbl_sec != "":
     phdr_total_count += 1
@@ -379,7 +361,7 @@
                                  '\x00' + \
   	                         '\x00' + \
                                  ('\x00' * 7))
-
+   
     # Address needs to be verified that it is not greater than 32 bits
     # as it is possible to go from a 64 bit elf to 32.
     if (elf_header1.e_entry > 0xFFFFFFFF):
@@ -415,7 +397,7 @@
             phdr_total_count = phdr_total_count - 1
     # Do not include xbl_sec in above calculation
     # xbl_sec is to be treated as a single blob
-
+    
 
   # Now it is ok to populate the ELF header and write it out
   out_elf_header.e_phnum = phdr_total_count
@@ -492,9 +474,9 @@
         exit()
       new_phdr.p_align  = curr_phdr.p_align
 
-
-    #print "i=",i
-    #print "phdr_offset=", phdr_offset
+    
+    print "i=",i
+    print "phdr_offset=", phdr_offset
 
     # update output file location to next phdr location
     elf_out_fp.seek(phdr_offset)
@@ -502,15 +484,12 @@
     phdr_offset += out_elf_header.e_phentsize
 
     inp_data_offset = curr_phdr.p_offset # used to read data from input file
-
-#    print "inp_data_offset="
-#    print inp_data_offset
-#
-#    print "curr_phdr.p_offset="
-#    print curr_phdr.p_offset
-#
-#    print "curr_phdr.p_filesz="
-#    print curr_phdr.p_filesz
+    print "inp_data_offset="
+    print inp_data_offset
+    print "curr_phdr.p_offset="
+    print curr_phdr.p_offset
+    print "curr_phdr.p_filesz="
+    print curr_phdr.p_filesz
 
     # output current phdr
     if is_out_elf_64_bit == False:
@@ -525,8 +504,14 @@
                  new_phdr.p_offset,
                                                new_phdr.p_filesz)
 
+    # Update segment alignment value if applicable
+    if new_phdr.p_align == SEGMENT_ALIGN_4K:
+      local_align = SEGMENT_ALIGN
+    else:
+      local_align = new_phdr.p_align
+    
     # update data segment offset to be aligned after previous segment
-    segment_offset += roundup(new_phdr.p_filesz, SEGMENT_ALIGN);
+    segment_offset += roundup(new_phdr.p_filesz, local_align);
   elf_in_fp1.close()
 
   # Output second elf data if applicable
@@ -590,9 +575,10 @@
           exit()
         new_phdr.p_align  = curr_phdr.p_align
 
-
-#     print "i=",i
-#     print "phdr_offset=", phdr_offset
+	print "i"
+	print i
+	print "phdr_offset="
+	print phdr_offset
 
       # update output file location to next phdr location
       elf_out_fp.seek(phdr_offset)
@@ -600,15 +586,12 @@
       phdr_offset += out_elf_header.e_phentsize
 
       inp_data_offset = curr_phdr.p_offset # used to read data from input file
-
-#     print "inp_data_offset="
-#     print inp_data_offset
-#
-#     print "curr_phdr.p_offset="
-#     print curr_phdr.p_offset
-#
-#     print "curr_phdr.p_filesz="
-#     print curr_phdr.p_filesz
+      print "inp_data_offset="
+      print inp_data_offset
+      print "curr_phdr.p_offset="
+      print curr_phdr.p_offset
+      print "curr_phdr.p_filesz="
+      print curr_phdr.p_filesz
 
       # output current phdr
       if is_out_elf_64_bit == False:
@@ -623,28 +606,38 @@
                                                  new_phdr.p_offset,
                                                  new_phdr.p_filesz)
 
+      # Update segment alignment value if applicable
+      if new_phdr.p_align == SEGMENT_ALIGN_4K:
+        local_align = SEGMENT_ALIGN
+      else:
+        local_align = new_phdr.p_align
+    
       # update data segment offset to be aligned after previous segment
-      segment_offset += roundup(new_phdr.p_filesz, SEGMENT_ALIGN);
+      segment_offset += roundup(new_phdr.p_filesz, local_align);
     elf_in_fp2.close()
-
+    
   # Embed xbl_sec image if provided
   if elf_in_file_xbl_sec != "":
-
+    
     # Scan pheaders in xbl_sec for segment that contains entry point address
     entry_seg_offset = -1
     entry_addr = elf_headerxblsec.e_entry
+    start_addr = -1
     for i in range(elf_headerxblsec.e_phnum):
       phdr = phdr_tablexblsec[i]
-      max_addr = phdr.p_vaddr + phdr.p_memsz
+      max_addr = phdr.p_vaddr + phdr.p_memsz - 1
       if phdr.p_vaddr <= entry_addr <= max_addr:
         entry_seg_offset = phdr.p_offset
+	start_addr = phdr.p_vaddr
         break
     if entry_seg_offset == -1:
       print "Error: Failed to find entry point in any segment!"
       exit()
     # magical equation for program header's phys and virt addr
-    phys_virt_addr = entry_addr - entry_seg_offset
-
+    # phys_virt_addr = entry_addr - entry_seg_offset
+    phys_virt_addr = start_addr - entry_seg_offset
+    print "entry_addr " + str(hex(entry_addr)) + "entry_seg_offset " + str(hex(start_addr)) + "phys_virt_addr " + str(hex(phys_virt_addr))
+    
     if is_out_elf_64_bit:
       # Converting from 32 to 64 elf requires no data size validation
       new_phdr = mbn_tools.Elf64_Phdr('\0' * ELF64_PHDR_SIZE)
@@ -654,7 +647,7 @@
       new_phdr.p_paddr  = phys_virt_addr
       new_phdr.p_filesz = os.path.getsize(elf_in_file_xbl_sec)
       new_phdr.p_memsz  = new_phdr.p_filesz
-      new_phdr.p_flags  = 0x5
+      new_phdr.p_flags  = 0x5 | (mbn_tools.MI_PBT_SWAPPED_SEGMENT << mbn_tools.MI_PBT_FLAG_SEGMENT_TYPE_SHIFT);
       new_phdr.p_align  = 0x1000
     else:
       # Converting from 64 to 32 elf requires data size validation
@@ -663,7 +656,7 @@
       new_phdr = mbn_tools.Elf32_Phdr('\0' * ELF32_PHDR_SIZE)
       new_phdr.p_type   = 0x1 #
       new_phdr.p_offset = segment_offset
-      new_phdr.p_flags  = 0x5
+      new_phdr.p_flags  = 0x5 | (mbn_tools.MI_PBT_SWAPPED_SEGMENT << mbn_tools.MI_PBT_FLAG_SEGMENT_TYPE_SHIFT);
       new_phdr.p_align  = 0x1000
 
       if phys_virt_addr > 0xFFFFFFFF:
@@ -678,22 +671,22 @@
         exit()
       new_phdr.p_filesz = os.path.getsize(elf_in_file_xbl_sec)
       new_phdr.p_memsz  = new_phdr.p_filesz
-
-
+      
+    
     # update output file location to next phdr location
     elf_out_fp.seek(phdr_offset)
     # increment phdr_offset to next location
     phdr_offset += out_elf_header.e_phentsize
     # Copy entire xbl_sec file, so start from byte 0
-    inp_data_offset = 0
-
+    inp_data_offset = 0 
+    
     # Output xbl_sec's phdr
     elf_in_file_xbl_sec
     if is_out_elf_64_bit == False:
       elf_out_fp.write(mbn_tools.Elf32_Phdr.getPackedData(new_phdr))
     else:
       elf_out_fp.write(mbn_tools.Elf64_Phdr.getPackedData(new_phdr))
-
+      
     # Copy the ENTIRE xbl_sec image
     bytes_written = mbn_tools.file_copy_offset(elf_in_fpxblsec,
                                                inp_data_offset,
@@ -702,10 +695,17 @@
                                                new_phdr.p_filesz)
     # update data segment offset to be aligned after previous segment
     # Not necessary, unless appending more pheaders after this point
-    segment_offset += roundup(new_phdr.p_filesz, SEGMENT_ALIGN);
 
+    # Update segment alignment value if applicable
+    if new_phdr.p_align == SEGMENT_ALIGN_4K:
+      local_align = SEGMENT_ALIGN
+    else:
+      local_align = new_phdr.p_align
+
+    segment_offset += roundup(new_phdr.p_filesz, local_align);
+    
     elf_in_fpxblsec.close()
-
+  
   elf_out_fp.close()
 
   return 0
diff --git a/util/qualcomm/description.md b/util/qualcomm/description.md
old mode 100644
new mode 100755
diff --git a/util/ipqheader/ipqheader.py b/util/qualcomm/ipqheader.py
similarity index 100%
rename from util/ipqheader/ipqheader.py
rename to util/qualcomm/ipqheader.py
diff --git a/util/ipqheader/mbn_tools.py b/util/qualcomm/mbn_tools.py
similarity index 94%
rename from util/ipqheader/mbn_tools.py
rename to util/qualcomm/mbn_tools.py
index c66afda..a144b23 100755
--- a/util/ipqheader/mbn_tools.py
+++ b/util/qualcomm/mbn_tools.py
@@ -1,38 +1,15 @@
-#!/usr/bin/env python2
+#==============================================================================
+#
+#  Copyright (c) 2010-2013,2017 Qualcomm Technologies, Inc.
+#  All Rights Reserved.
+#  Confidential and Proprietary - Qualcomm Technologies, Inc.
+#
 #===============================================================================
 #
 # MBN TOOLS
 #
 # GENERAL DESCRIPTION
 #    Contains all MBN Utilities for image generation
-#
-# Copyright (c) 2016, The Linux Foundation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of The Linux Foundation nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-# ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
 #-------------------------------------------------------------------------------
 #                      EDIT HISTORY FOR FILE
 #
@@ -52,13 +29,12 @@
 # 10/20/11   dxiang  Clean up
 #===============================================================================
 
-import stat
-import csv
-import itertools
-import struct
-import os
-import shutil
+import copy
 import hashlib
+import os
+import re
+import shutil
+import struct
 
 #----------------------------------------------------------------------------
 # GLOBAL VARIABLES BEGIN
@@ -311,7 +287,7 @@
     1) 'X's  | char *         | string      | 'X' bytes
     2) H     | unsigned short | integer     | 2 bytes
     3) I     | unsigned int   | integer     | 4 bytes
-
+    4) Q     | unsigned long  | integer     | 8 bytes
 """
 
 #----------------------------------------------------------------------------
@@ -989,6 +965,12 @@
       boot_header.cert_chain_ptr = image_dest + code_size + signature_size
       boot_header.cert_chain_size = cert_chain_size
 
+      # If platform image integrity check is enabled
+      if 'USES_PLATFORM_IMAGE_INTEGRITY_CHECK' in env:
+          boot_header.flash_parti_ver = 5   # version
+          boot_header.image_src = 0         # sig_size_qti
+          boot_header.image_dest_ptr = 0    # cert_chain_size_qti
+
       # If preamble is required, output the preamble file and update the boot_header
       if requires_preamble is True:
          boot_header = image_preamble(gen_dict, preamble_file_name, boot_header, num_of_pages)
@@ -1005,14 +987,16 @@
 # pboot_gen_elf
 #----------------------------------------------------------------------------
 def pboot_gen_elf(env, elf_in_file_name,
-                       hash_out_file_name,
-                       elf_out_file_name,
-                       secure_type = 'non_secure',
-                       hash_seg_max_size = None,
-                       last_phys_addr = None,
-                       append_xml_hdr = False,
-                       is_sha256_algo = True,
-                       cert_chain_size_in = CERT_CHAIN_ONEROOT_MAXSIZE):
+                  hash_out_file_name,
+                  elf_out_file_name,
+                  secure_type='non_secure',
+                  hash_seg_max_size=None,
+                  last_phys_addr=None,
+                  append_xml_hdr=False,
+                  is_sha256_algo=True,
+                  cert_chain_size_in=CERT_CHAIN_ONEROOT_MAXSIZE,
+                  hash_pageseg_as_segment=False):
+
    global MI_PROG_BOOT_DIGEST_SIZE
    if (is_sha256_algo is True):
       MI_PROG_BOOT_DIGEST_SIZE = 32
@@ -1090,26 +1074,41 @@
 
          off = seg_offset + seg_size
 
-         while seg_offset < off:
+         # Add a single hash table entry for pageable segment
+         if hash_pageseg_as_segment:
+             elf_in_fp.seek(seg_offset)
+             fbuf = elf_in_fp.read(seg_size)
 
-            if seg_offset < ELF_BLOCK_ALIGN:
-               hash_size = seg_offset
-            else:
-               hash_size = ELF_BLOCK_ALIGN
+             if MI_PBT_CHECK_FLAG_TYPE(curr_phdr.p_flags) is True:
+                 hash = generate_hash(fbuf, is_sha256_algo)
+             else:
+                 hash = '\0' * MI_PROG_BOOT_DIGEST_SIZE
 
-            elf_in_fp.seek(seg_offset)
-            fbuf = elf_in_fp.read(hash_size)
+             # Write hash to file
+             hash_out_fp.write(hash)
+             hashtable_size += MI_PROG_BOOT_DIGEST_SIZE
+         # Add a hash table entry for each block of pageable segment
+         else:
+             while seg_offset < off:
 
-            if MI_PBT_CHECK_FLAG_TYPE(curr_phdr.p_flags) is True:
-               hash = generate_hash(fbuf, is_sha256_algo)
-            else:
-               hash = '\0' * MI_PROG_BOOT_DIGEST_SIZE
+                if seg_offset < ELF_BLOCK_ALIGN:
+                   hash_size = seg_offset
+                else:
+                   hash_size = ELF_BLOCK_ALIGN
 
-            # Write hash to file
-            hash_out_fp.write(hash)
+                elf_in_fp.seek(seg_offset)
+                fbuf = elf_in_fp.read(hash_size)
 
-            hashtable_size += MI_PROG_BOOT_DIGEST_SIZE
-            seg_offset += ELF_BLOCK_ALIGN
+                if MI_PBT_CHECK_FLAG_TYPE(curr_phdr.p_flags) is True:
+                   hash = generate_hash(fbuf, is_sha256_algo)
+                else:
+                   hash = '\0' * MI_PROG_BOOT_DIGEST_SIZE
+
+                # Write hash to file
+                hash_out_fp.write(hash)
+
+                hashtable_size += MI_PROG_BOOT_DIGEST_SIZE
+                seg_offset += ELF_BLOCK_ALIGN
 
       # Copy the hash entry for all that are PAGED segments and those that are not the PHDR type. This is for
       # backward tool compatibility where some images are generated using older exe tools.
@@ -1165,15 +1164,15 @@
      bytes_to_pad = ELF_BLOCK_ALIGN - pad_hash_segment
      hash_seg_end = hash_tbl_end_addr + bytes_to_pad
 
-     # Check if a shifting is required to accommodate for the hash segment.
+     # Check if a shifting is required to accomodate for the hash segment.
      # Get the minimum offset by going through the program headers.
      # Note that the program headers in the input file do not contain
      # the dummy program header for ELF + Program header, and the
      # program header for the hashtable.
-     min_offset = phdr_table[0].p_offset
+     min_offset = hash_seg_end #start with the minimum needed
      for i in range(num_phdrs):
         curr_phdr = phdr_table[i]
-        if curr_phdr.p_offset < min_offset:
+        if curr_phdr.p_offset < min_offset and (curr_phdr.p_type != PHDR_TYPE): # discard entry of type PHDR which will have offset=0:
             min_offset = curr_phdr.p_offset
 
      if min_offset < hash_seg_end:
@@ -1443,6 +1442,11 @@
    [elf_header, phdr_table] = preprocess_elf_file(elf_in_file_name)
    segment_list = readSCL(scl_file_name, env['GLOBAL_DICT'])
 
+   if 'USES_FEATURE_DYNAMIC_LOADING' in env:
+      sub = 1
+   else:
+      sub = 0
+
    if elf_header.e_ident[ELFINFO_CLASS_INDEX] == ELFINFO_CLASS_64:
      curr_phdr = Elf64_Phdr('\0' * ELF64_PHDR_SIZE)
      # Offset into program header where the p_flags field is stored
@@ -1456,14 +1460,14 @@
    elf_in_fp = OPEN(elf_in_file_name, "r+")
 
    # Check for corresponding number of segments
-   if len(segment_list) is not elf_header.e_phnum:
+   if len(segment_list) is not (elf_header.e_phnum -sub):
       raise RuntimeError, 'SCL file and ELF file have different number of segments!'
 
    # Go to the start of the p_flag entry in the first program header
    file_offset = elf_header.e_phoff + phdr_flag_off
 
    # Change each program header flag in the ELF file based off the SCL file
-   for i in range(elf_header.e_phnum):
+   for i in range(elf_header.e_phnum -sub):
       # Seek to correct location and create new p_flag value
       elf_in_fp.seek(file_offset)
       curr_phdr = phdr_table[i]
@@ -1810,8 +1814,8 @@
    # Get file names for 'cust' and 'targ' auto-generated files inside 'build/ms'
    cust_h = env.subst('CUST${BUILD_ID}.H').lower()
    targ_h = env.subst('TARG${BUILD_ID}.H').lower()
-   cust_file_name = str(env.FindFile(cust_h, "${INC_ROOT}/build/ms"))
-   targ_file_name = str(env.FindFile(targ_h, "${INC_ROOT}/build/ms"))
+   cust_file_name = str(env.FindFile(cust_h, "${INC_ROOT}/build/ms").abspath)
+   targ_file_name = str(env.FindFile(targ_h, "${INC_ROOT}/build/ms").abspath)
 
    # Check that files are present
    if (os.path.exists(cust_file_name) is True) and \
@@ -1834,45 +1838,36 @@
       raise RuntimeError, "At least 1 file must be specified as an input"
 
    global_dict = {}
-   Fields = ["Define", "Key", "Value"]
 
    # For each input file
    for i in range(len(args)):
+      with open(args[i]) as fp:
+         for line in fp:
+            temp = re.findall('\#define[ ]+(\w+)([ ]+[^\n]+)*', line.strip())
+            if (1 == len(temp)):
+               k, v = temp[0]
+               k = k.strip()
+               v = v.strip()
+               if 0 == len(v):
+                  # If value pair is empty string, assume feature definition is true
+                  global_dict[k] = 'yes'
+                  continue
 
-      template_file_path = args[i]
-      instream = OPEN(template_file_path, 'r')
-      # Tokenize each line with a white space
-      values = csv.DictReader(instream, Fields, delimiter=" ")
+               if global_dict is not None and len(global_dict.keys()) > 0:
+                  # Check for and handle text replacements as we parse
+                  all_keys = copy.copy(global_dict.keys())
+                  all_keys.sort(key=lambda x: len(x), reverse=True)
+                  for x in all_keys:
+                     v = v.replace(x, str(global_dict[x]))
 
-      for values in itertools.izip(values):
-         new_entry = values[0]
-         # Verify the parsed tokens
-         if (new_entry['Define'] == '#define') and \
-            (new_entry['Key'] != None) and \
-            (new_entry['Value'] != None):
+               # Attempt to evaluate value
+               try:
+                  v = eval(v)
+               # Catch exceptions and do not evaluate
+               except:
+                  pass
 
-            new_key   = new_entry['Key'].strip()
-            new_value = new_entry['Value'].strip()
-
-            # If value pair is empty string, assume feature definition is true
-            if new_value == '':
-               new_value = 'yes'
-
-            # Check for and handle text replacements as we parse
-            if global_dict is not None and len(global_dict.keys()) > 0:
-               for key in global_dict:
-                  new_value = new_value.replace(key, str(global_dict.get(key)))
-
-            # Attempt to evaluate value
-            try:
-               new_value = eval(new_value)
-            # Catch exceptions and do not evaluate
-            except:
-               pass
-
-            # Add to global dictionary
-            global_dict[new_key] = new_value
-      instream.close()
+               global_dict[k] = v
 
    return global_dict
 
@@ -2224,25 +2219,25 @@
         #generate new file for appending target data + required MCs
         file = open(target, "ab")
 
+        filedata_till_128kb = filedata[0:VIRTUAL_BLOCK_SIZE]
+        filedata_after_128kb = filedata[VIRTUAL_BLOCK_SIZE:length]
+
+        a = str(hex(FLASH_CODE_WORD))
+        mc1 = chr(int(a[8:10],16)) + chr(int(a[6:8],16)) + chr(int(a[4:6],16)) + chr(int(a[2:4],16))
+
+        b = str(hex(MAGIC_NUM))
+        mc2 = chr(int(b[8:10],16)) + chr(int(b[6:8],16)) + chr(int(b[4:6],16)) + chr(int(b[2:4],16))
+
+        c = str(hex(SBL_VIRTUAL_BLOCK_MAGIC_NUM))
+        mc3 = chr(int(c[8:10],16)) + chr(int(c[6:8],16)) + chr(int(c[4:6],16)) + chr(int(c[2:4],16))
+
         while length > VIRTUAL_BLOCK_SIZE:
+            file.write(filedata_till_128kb)
+            filedata = mc1 + mc2 + mc3 + filedata_after_128kb
+            length = len(filedata)
             filedata_till_128kb = filedata[0:VIRTUAL_BLOCK_SIZE]
             filedata_after_128kb = filedata[VIRTUAL_BLOCK_SIZE:length]
 
-            a = str(hex(FLASH_CODE_WORD))
-            mc1 = chr(int(a[8:10],16)) + chr(int(a[6:8],16)) + chr(int(a[4:6],16)) + chr(int(a[2:4],16))
-
-            b = str(hex(MAGIC_NUM))
-            mc2 = chr(int(b[8:10],16)) + chr(int(b[6:8],16)) + chr(int(b[4:6],16)) + chr(int(b[2:4],16))
-
-            c = str(hex(SBL_VIRTUAL_BLOCK_MAGIC_NUM))
-            mc3 = chr(int(c[8:10],16)) + chr(int(c[6:8],16)) + chr(int(c[4:6],16)) + chr(int(c[2:4],16))
-
-            MC_inserted_data = filedata_till_128kb + mc1 + mc2 + mc3
-            file.write(MC_inserted_data)
-
-            filedata = filedata_after_128kb
-            length = len(filedata)
-
         #copy the leftover data (<128KB) in output file
         if length > 0:
             file.write(filedata)
@@ -2313,3 +2308,4 @@
 #----------------------------------------------------------------------------
 # HELPER FUNCTIONS END
 #----------------------------------------------------------------------------
+
diff --git a/util/ipqheader/mbncat.py b/util/qualcomm/mbncat.py
similarity index 100%
rename from util/ipqheader/mbncat.py
rename to util/qualcomm/mbncat.py
diff --git a/util/qualcomm/qgpt.py b/util/qualcomm/qgpt.py
new file mode 100755
index 0000000..596ec54
--- /dev/null
+++ b/util/qualcomm/qgpt.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+#============================================================================
+#
+#/** @file qgpt.py
+#
+# GENERAL DESCRIPTION
+#   Generates QCom GPT header for wrapping Bootblock
+#
+# Copyright (c) 2018, The Linux Foundation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of The Linux Foundation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+# ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#**/
+#
+
+import os
+import math
+import random
+import re
+import struct
+import sys
+import tempfile
+
+from binascii import crc32
+from optparse import OptionParser
+from types import *
+
+
+def UpdateMBR(options, GPTBlobBuffer):
+    i = 0x1BE
+    GPTBlobBuffer[i + 0] = 0x00                  # not bootable
+    GPTBlobBuffer[i + 1] = 0x00                  # head
+    GPTBlobBuffer[i + 2] = 0x01                  # sector
+    GPTBlobBuffer[i + 3] = 0x00                  # cylinder
+    GPTBlobBuffer[i + 4] = 0xEE                  # type
+    GPTBlobBuffer[i + 5] = 0xFF                  # head
+    GPTBlobBuffer[i + 6] = 0xFF                  # sector
+    GPTBlobBuffer[i + 7] = 0xFF                  # cylinder
+    GPTBlobBuffer[i + 8:i + 8 + 4] = [0x01, 0x00, 0x00, 0x00]
+
+    GPTBlobBuffer[i + 12:i + 16] = [0x00, 0x0f, 0x00, 0x00]
+
+    # magic byte for MBR partitioning - always at this location regardless of
+    # options.sector
+    GPTBlobBuffer[510:512] = [0x55, 0xAA]
+    return i
+
+
+def UpdatePartitionEntry(options, GPTBlobBuffer):
+
+    i = 2 * options.sector_size
+    # GUID of Boot Block
+    GPTBlobBuffer[i:i + 16] = [0x2c, 0xba, 0xa0, 0xde, 0xdd, 0xcb, 0x05, 0x48,
+        0xb4, 0xf9, 0xf4, 0x28, 0x25, 0x1c, 0x3e, 0x98]
+    i += 16
+
+    #This is to set Unique Partition GUID. Below Hex Value is : 00ChezaBootblock00
+    UniquePartitionGUID = 0x6b636f6c62746f6f42617a65684300
+
+    for b in range(16):
+        GPTBlobBuffer[i] = ((UniquePartitionGUID >> (b * 8)) & 0xFF)
+        i += 1
+
+    # LBA of BootBlock Start Content
+    GPTBlobBuffer[i:i + 8] = [0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+    i += 8
+
+    # End LBA of BootBlock Content
+    GPTBlobBuffer[i] = options.end_lba & 0xFF
+    GPTBlobBuffer[i+1] = (options.end_lba>>8) & 0xFF
+    GPTBlobBuffer[i+2] = (options.end_lba>>16) & 0xFF
+    GPTBlobBuffer[i+3] = (options.end_lba>>24) & 0xFF
+    GPTBlobBuffer[i+4] = (options.end_lba>>32) & 0xFF
+    GPTBlobBuffer[i+5] = (options.end_lba>>40) & 0xFF
+    GPTBlobBuffer[i+6] = (options.end_lba>>48) & 0xFF
+    GPTBlobBuffer[i+7] = (options.end_lba>>56) & 0xFF
+    i += 8
+
+    # Attributes
+    GPTBlobBuffer[i:i + 8] = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+    i += 8
+
+    # Lable
+    GPTBlobBuffer[i:i + 17] = [0x62, 0x00, 0x6f, 0x00, 0x6f, 0x00, 0x74, 0x00,
+        0x62, 0x00, 0x6c, 0x00, 0x6f, 0x00, 0x63, 0x00, 0x6b]
+
+    return i
+
+def UpdateGPTHeader(options, GPTBlobBuffer):
+
+    i = options.sector_size
+    # Signature and Revision and HeaderSize i.e. "EFI PART" and 00 00 01 00
+    # and 5C 00 00 00
+    GPTBlobBuffer[i:i + 16] = [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54,
+        0x00, 0x00, 0x01, 0x00, 0x5C, 0x00, 0x00, 0x00]
+    i += 16
+
+    # CRC is zeroed out till calculated later
+    GPTBlobBuffer[i:i + 4] = [0x00, 0x00, 0x00, 0x00]
+    i += 4
+
+    # Reserved, set to 0
+    GPTBlobBuffer[i:i + 4] = [0x00, 0x00, 0x00, 0x00]
+    i += 4
+
+    # Current LBA
+    GPTBlobBuffer[i:i + 8] = [0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+    i += 8
+
+    # Backup LBA, No Backup Gpt Used
+    GPTBlobBuffer[i:i + 8] = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+    i += 8
+
+    # First Usuable LBA (qc_sec + bootblock location)
+    GPTBlobBuffer[i:i + 8] = [0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+    i += 8
+
+    # Last Usuable LBA (qc_sec + bootblock end location)
+    GPTBlobBuffer[i] = options.end_lba & 0xFF
+    GPTBlobBuffer[i+1] = (options.end_lba>>8) & 0xFF
+    GPTBlobBuffer[i+2] = (options.end_lba>>16) & 0xFF
+    GPTBlobBuffer[i+3] = (options.end_lba>>24) & 0xFF
+    GPTBlobBuffer[i+4] = (options.end_lba>>32) & 0xFF
+    GPTBlobBuffer[i+5] = (options.end_lba>>40) & 0xFF
+    GPTBlobBuffer[i+6] = (options.end_lba>>48) & 0xFF
+    GPTBlobBuffer[i+7] = (options.end_lba>>56) & 0xFF
+    i += 8
+
+    # GUID
+    GPTBlobBuffer[i:i + 16] = [0x32,0x1B,0x10,0x98,0xE2,0xBB,0xF2,0x4B,
+        0xA0,0x6E,0x2B,0xB3,0x3D,0x00,0x0C,0x20]
+    i += 16
+
+    # Partition Table Entry LBA
+    GPTBlobBuffer[i:i + 8] = [0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+    i += 8
+
+    # Number of Partition Entries
+    GPTBlobBuffer[i:i + 4] = [0x01, 0x00, 0x00, 0x00]
+    i += 4
+
+    # Size of One Partition Entry
+    GPTBlobBuffer[i:i + 4] = [0x80, 0x00, 0x00, 0x00]
+    i += 4
+
+    # CRC of Partition Entry
+
+    PartEntry = GPTBlobBuffer[options.sector_size*2:options.sector_size*2 + 128]
+    CalcEntryCRC = crc32(''.join(struct.pack("B", x) for x in PartEntry))
+
+    GPTBlobBuffer[i] = CalcEntryCRC & 0xFF
+    GPTBlobBuffer[i+1] = (CalcEntryCRC>>8) & 0xFF
+    GPTBlobBuffer[i+2] = (CalcEntryCRC>>16) & 0xFF
+    GPTBlobBuffer[i+3] = (CalcEntryCRC>>24) & 0xFF
+    i += 4
+
+    # CRC of Partition Table Header
+    GPTHeader = GPTBlobBuffer[options.sector_size:options.sector_size + 92]
+    CalcEntryCRC = crc32(''.join(struct.pack("B", x) for x in GPTHeader))
+    i = options.sector_size + 16
+
+    GPTBlobBuffer[i] = CalcEntryCRC & 0xFF
+    GPTBlobBuffer[i+1] = (CalcEntryCRC>>8) & 0xFF
+    GPTBlobBuffer[i+2] = (CalcEntryCRC>>16) & 0xFF
+    GPTBlobBuffer[i+3] = (CalcEntryCRC>>24) & 0xFF
+
+    return i
+
+
+def openfile(name, perm):
+    try:
+        f = open(name, perm)
+    except:
+        print("could not open path {0}".format(name))
+        print("Do you have read permissions on the path?")
+        sys.exit(1)
+    return f
+
+
+if __name__ == '__main__':
+    usage = 'usage: %prog [OPTIONS] INFILE OUTFILE\n\n' + \
+            'Packages IMAGE in a GPT format.'
+    parser = OptionParser(usage)
+    parser.add_option('-s', type="int", dest='sector_size', default=4096,
+                      help='Sector size in bytes [Default:4096(4KB)]',
+                      metavar='SIZE')
+
+    (options, args) = parser.parse_args()
+    if len(args) != 2:
+        print("Invalid arguments! Exiting...\n")
+        parser.print_help()
+        sys.exit(1)
+
+    if options.sector_size != 4096 and options.sector_size != 512:
+        print("Invalid Sector Size")
+        sys.exit(1)
+
+    options.inputfile = args[0]
+    options.outputfile = args[1]
+
+    try:
+        statinfo = os.stat(options.inputfile)
+    except OSError as err:
+        print('OS Error: {0}'.format(err))
+        sys.exit(1)
+
+    # 3(MBR+GPT+PART_ENTRY) + 1(if the input file size is not perfect division of sector size)
+    options.end_lba = (statinfo.st_size/options.sector_size)+4
+
+
+    GPTBlobBuffer = [0] * (options.sector_size*3) #Size of MBR+GPT+PART_ENTRY
+
+    UpdateMBR(options, GPTBlobBuffer)
+
+    UpdatePartitionEntry(options, GPTBlobBuffer)
+
+    UpdateGPTHeader(options, GPTBlobBuffer)
+
+    fin = openfile(options.inputfile, 'r+')
+    fout = openfile(options.outputfile, 'wb')
+    for b in GPTBlobBuffer:
+        fout.write(struct.pack("B", b))
+    bb_buffer = fin.read(statinfo.st_size)
+    fout.write(bb_buffer)
+    fout.close()
+    fin.close()
diff --git a/util/qualcomm/scripts/cmm/debug_cb_405.cmm b/util/qualcomm/scripts/cmm/debug_cb_405.cmm
old mode 100644
new mode 100755
diff --git a/util/qualcomm/scripts/cmm/debug_cb_845.cmm b/util/qualcomm/scripts/cmm/debug_cb_845.cmm
old mode 100644
new mode 100755
diff --git a/util/qualcomm/scripts/cmm/debug_cb_common.cmm b/util/qualcomm/scripts/cmm/debug_cb_common.cmm
old mode 100644
new mode 100755
diff --git a/util/qualcomm/scripts/cmm/pbl32_to_bootblock64_jump.cmm b/util/qualcomm/scripts/cmm/pbl32_to_bootblock64_jump.cmm
old mode 100644
new mode 100755

-- 
To view, visit https://review.coreboot.org/c/coreboot/+/29973
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings

Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: Ief4d92214cdc7ec06e90b0c7e73c11b6d6deddb9
Gerrit-Change-Number: 29973
Gerrit-PatchSet: 1
Gerrit-Owner: nsekar at codeaurora.org
Gerrit-Reviewer: Julius Werner <jwerner at chromium.org>
Gerrit-Reviewer: Martin Roth <martinroth at google.com>
Gerrit-Reviewer: Patrick Georgi <pgeorgi at google.com>
Gerrit-Reviewer: nsekar at codeaurora.org
Gerrit-MessageType: newchange
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.coreboot.org/pipermail/coreboot-gerrit/attachments/20181130/acbabc3c/attachment-0001.html>


More information about the coreboot-gerrit mailing list