diff --git a/scripts/.gitignore b/scripts/.gitignore new file mode 100644 index 000000000..567609b12 --- /dev/null +++ b/scripts/.gitignore @@ -0,0 +1 @@ +build/ diff --git a/scripts/base_image_util.sh b/scripts/base_image_util.sh new file mode 100755 index 000000000..e6c87e5a9 --- /dev/null +++ b/scripts/base_image_util.sh @@ -0,0 +1,286 @@ +# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +. "toolchain_utils.sh" || exit 1 +. "common.sh" || exit 1 +CHROMEOS_MASTER_CONFIG_FILE="${BOARD_ROOT}/usr/share/chromeos-config/config.dtb" +BUILD_DIR="build" +BUILD_LIBRARY_DIR="." +SCRIPTS_DIR="." +echo "HERE" +. "${BUILD_LIBRARY_DIR}/disk_layout_util.sh" || exit 1 +echo "HERE" +. "${BUILD_LIBRARY_DIR}/mount_gpt_util.sh" || exit 1 +echo "HERE" +. "${BUILD_LIBRARY_DIR}/build_image_util.sh" || exit 1 +echo "HERE" + + +check_full_disk() { + local prev_ret=$? + + # Disable die on error. + set +e + + # See if we ran out of space. Only show if we errored out via a trap. + if [[ ${prev_ret} -ne 0 ]]; then + local df=$(df -B 1M "${root_fs_dir}") + if [[ ${df} == *100%* ]]; then + error "Here are the biggest [partially-]extracted files (by disk usage):" + # Send final output to stderr to match `error` behavior. + sudo find "${root_fs_dir}" -xdev -type f -printf '%b %P\n' | \ + awk '$1 > 16 { $1 = $1 * 512; print }' | sort -n | tail -100 1>&2 + error "Target image has run out of space:" + error "${df}" + fi + fi + + # Turn die on error back on. + set -e +} + +zero_free_space() { + local fs_mount_point=$1 + + if ! mountpoint -q "${fs_mount_point}"; then + info "Not zeroing freespace in ${fs_mount_point} since it isn't a mounted" \ + "filesystem. This is normal for squashfs and ubifs partitions." + return 0 + fi + + info "Zeroing freespace in ${fs_mount_point}" + # dd is a silly thing and will produce a "No space left on device" message + # that cannot be turned off and is confusing to unsuspecting victims. + info "${fs_mount_point}/filler" + ( sudo dd if=/dev/zero of="${fs_mount_point}/filler" bs=4096 conv=fdatasync \ + status=noxfer || true ) 2>&1 | grep -v "No space left on device" + sudo rm "${fs_mount_point}/filler" +} + +# create_dev_install_lists updates package lists used by +# chromeos-base/dev-install +create_dev_install_lists() { + local root_fs_dir=$1 + + info "Building dev-install package lists" + + local pkgs=( + portage + virtual/target-os + virtual/target-os-dev + virtual/target-os-test + ) + + local pkgs_out=$(mktemp -d) + + for pkg in "${pkgs[@]}" ; do + emerge-${BOARD} --color n --pretend --quiet --emptytree \ + --root-deps=rdeps ${pkg} | \ + egrep -o ' [[:alnum:]-]+/[^[:space:]/]+\b' | \ + tr -d ' ' | \ + sort > "${pkgs_out}/${pkg##*/}.packages" + local _pipestatus=${PIPESTATUS[*]} + [[ ${_pipestatus// } -eq 0 ]] || error "\`emerge-${BOARD} ${pkg}\` failed" + done + + # bootstrap = portage - target-os + comm -13 "${pkgs_out}/target-os.packages" \ + "${pkgs_out}/portage.packages" > "${pkgs_out}/bootstrap.packages" + + # chromeos-base = target-os + portage - virtuals + sort -u "${pkgs_out}/target-os.packages" "${pkgs_out}/portage.packages" \ + | grep -v "virtual/" \ + > "${pkgs_out}/chromeos-base.packages" + + # package.installable = target-os-dev + target-os-test - target-os + virtuals + comm -23 <(cat "${pkgs_out}/target-os-dev.packages" \ + "${pkgs_out}/target-os-test.packages" | sort) \ + "${pkgs_out}/target-os.packages" \ + > "${pkgs_out}/package.installable" + grep "virtual/" "${pkgs_out}/target-os.packages" \ + >> "${pkgs_out}/package.installable" + + # Add dhcp to the list of packages installed since its installation will not + # complete (can not add dhcp group since /etc is not writeable). Bootstrap it + # instead. + grep "net-misc/dhcp-" "${pkgs_out}/target-os-dev.packages" \ + >> "${pkgs_out}/chromeos-base.packages" || true + grep "net-misc/dhcp-" "${pkgs_out}/target-os-dev.packages" \ + >> "${pkgs_out}/bootstrap.packages" || true + + sudo mkdir -p \ + "${root_fs_dir}/usr/share/dev-install/portage/make.profile/package.provided" + sudo cp "${pkgs_out}/bootstrap.packages" \ + "${root_fs_dir}/usr/share/dev-install/portage" + sudo cp "${pkgs_out}/package.installable" \ + "${root_fs_dir}/usr/share/dev-install/portage/make.profile" + sudo cp "${pkgs_out}/chromeos-base.packages" \ + "${root_fs_dir}/usr/share/dev-install/portage/make.profile/package.provided" + + rm -r "${pkgs_out}" +} + +install_libc() { + root_fs_dir="$1" + # We need to install libc manually from the cross toolchain. + # TODO: Improve this? It would be ideal to use emerge to do this. + libc_version="$(get_variable "${BOARD_ROOT}/${SYSROOT_SETTINGS_FILE}" \ + "LIBC_VERSION")" + PKGDIR="/var/lib/portage/pkgs" + local libc_atom="cross-${CHOST}/glibc-${libc_version}" + LIBC_PATH="${PKGDIR}/${libc_atom}.tbz2" + + if [[ ! -e ${LIBC_PATH} ]]; then + sudo emerge --nodeps -gf "=${libc_atom}" + fi + + # Strip out files we don't need in the final image at runtime. + local libc_excludes=( + # Compile-time headers. + 'usr/include' 'sys-include' + # Link-time objects. + '*.[ao]' + # Debug commands not used by normal runtime code. + 'usr/bin/'{getent,ldd} + # LD_PRELOAD objects for debugging. + 'lib*/lib'{memusage,pcprofile,SegFault}.so 'usr/lib*/audit' + # We only use files & dns with nsswitch, so throw away the others. + 'lib*/libnss_'{compat,db,hesiod,nis,nisplus}'*.so*' + # This is only for very old packages which we don't have. + 'lib*/libBrokenLocale*.so*' + ) + pbzip2 -dc --ignore-trailing-garbage=1 "${LIBC_PATH}" | \ + sudo tar xpf - -C "${root_fs_dir}" ./usr/${CHOST} \ + --strip-components=3 "${libc_excludes[@]/#/--exclude=}" +} + +create_base_image() { + local image_name=$1 + local rootfs_verification_enabled=$2 + local bootcache_enabled=$3 + local output_dev=$4 + local image_type="usb" + + BUILD_DIR="build" + check_valid_layout "base" + check_valid_layout "${image_type}" + + echo "Using image type ${image_type}" + get_disk_layout_path + echo "Using disk layout ${DISK_LAYOUT_PATH}" + + mkdir -p "$BUILD_DIR" + + root_fs_dir="${BUILD_DIR}/rootfs" + stateful_fs_dir="${BUILD_DIR}/stateful" + esp_fs_dir="${BUILD_DIR}/esp" + + mkdir "${root_fs_dir}" "${stateful_fs_dir}" "${esp_fs_dir}" + echo "Building GPT IMAGE" + build_gpt_image "${output_dev}" "${image_type}" + + echo "Mounting GPT IMAGE" + mount_image "${output_dev}" "${root_fs_dir}" \ + "${stateful_fs_dir}" "${esp_fs_dir}" + + echo "Df- h command" + df -h "${root_fs_dir}" + + # Create symlinks so that /usr/local/usr based directories are symlinked to + # /usr/local/ directories e.g. /usr/local/usr/bin -> /usr/local/bin, etc. + + # INSTALL KERNEL ON PARTITION 2 + + local kernel_partition="2" + sudo dd if=kernels/linux.bin of=${output_dev}${kernel_partition} + + "${VBOOT_SIGNING_DIR}"/insert_container_publickey.sh \ + "${root_fs_dir}" \ + "${VBOOT_DEVKEYS_DIR}"/cros-oci-container-pub.pem + + + "${GCLIENT_ROOT}/chromite/bin/cros_set_lsb_release" \ + --sysroot="${root_fs_dir}" \ + --board="${BOARD}" \ + "${model_flags[@]}" \ + ${builder_path} \ + --version_string="${CHROMEOS_VERSION_STRING}" \ + --auserver="${CHROMEOS_VERSION_AUSERVER}" \ + --devserver="${CHROMEOS_VERSION_DEVSERVER}" \ + ${official_flag} \ + --buildbot_build="${BUILDBOT_BUILD:-"N/A"}" \ + --track="${CHROMEOS_VERSION_TRACK:-"developer-build"}" \ + --branch_number="${CHROMEOS_BRANCH}" \ + --build_number="${CHROMEOS_BUILD}" \ + --chrome_milestone="${CHROME_BRANCH}" \ + --patch_number="${CHROMEOS_PATCH}" \ + "${arc_flags[@]}" + + # Set /etc/os-release on the image. + # Note: fields in /etc/os-release can come from different places: + # * /etc/os-release itself with docrashid + # * /etc/os-release.d for fields created with do_osrelease_field + sudo "${GCLIENT_ROOT}/chromite/bin/cros_generate_os_release" \ + --root="${root_fs_dir}" \ + --version="${CHROME_BRANCH}" \ + --build_id="${CHROMEOS_VERSION_STRING}" + + # Create the boot.desc file which stores the build-time configuration + # information needed for making the image bootable after creation with + # cros_make_image_bootable. + create_boot_desc "${image_type}" + + # Write out the GPT creation script. + # This MUST be done before writing bootloader templates else we'll break + # the hash on the root FS. + write_partition_script "${image_type}" \ + "${root_fs_dir}/${PARTITION_SCRIPT_PATH}" + sudo chown root:root "${root_fs_dir}/${PARTITION_SCRIPT_PATH}" + + # Populates the root filesystem with legacy bootloader templates + # appropriate for the platform. The autoupdater and installer will + # use those templates to update the legacy boot partition (12/ESP) + # on update. + # (This script does not populate vmlinuz.A and .B needed by syslinux.) + # Factory install shims may be booted from USB by legacy EFI BIOS, which does + # not support verified boot yet (see create_legacy_bootloader_templates.sh) + # so rootfs verification is disabled if we are building with --factory_install + local enable_rootfs_verification= + if [[ ${rootfs_verification_enabled} -eq ${FLAGS_TRUE} ]]; then + enable_rootfs_verification="--enable_rootfs_verification" + fi + local enable_bootcache= + if [[ ${bootcache_enabled} -eq ${FLAGS_TRUE} ]]; then + enable_bootcache="--enable_bootcache" + fi + + create_legacy_bootloader_templates.sh \ + --arch=${ARCH} \ + --board=${BOARD} \ + --image_type="${image_type}" \ + --to="${root_fs_dir}"/boot \ + --boot_args="${FLAGS_boot_args}" \ + --enable_serial="${FLAGS_enable_serial}" \ + --loglevel="${FLAGS_loglevel}" \ + ${enable_rootfs_verification} \ + ${enable_bootcache} + + + + # Zero rootfs free space to make it more compressible so auto-update + # payloads become smaller + zero_free_space "${root_fs_dir}" + + unmount_image + trap - EXIT + + USE_DEV_KEYS="--use_dev_keys" + + if [[ ${skip_kernelblock_install} -ne 1 ]]; then + # Place flags before positional args. + ${SCRIPTS_DIR}/bin/cros_make_image_bootable "${BUILD_DIR}" \ + ${output_dev} ${USE_DEV_KEYS} --adjust_part="${FLAGS_adjust_part}" + fi +} +create_base_image $1 $2 $3 $4 diff --git a/scripts/bin/cros_get_chrome_version b/scripts/bin/cros_get_chrome_version new file mode 100755 index 000000000..80267c552 --- /dev/null +++ b/scripts/bin/cros_get_chrome_version @@ -0,0 +1,45 @@ +#!/bin/bash + +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Returns the version of Chrome running on a remote machine. + +# --- BEGIN COMMON.SH BOILERPLATE --- +# Load common CrOS utilities. Inside the chroot this file is installed in +# /usr/lib/crosutils. Outside the chroot we find it relative to the script's +# location. +find_common_sh() { + local common_paths=("$(dirname "$(readlink -f "$0")")/.." /usr/lib/crosutils) + local path + + SCRIPT_ROOT="${common_paths[0]}" + for path in "${common_paths[@]}"; do + if [ -r "${path}/common.sh" ]; then + SCRIPT_ROOT="${path}" + break + fi + done +} + +find_common_sh +. "${SCRIPT_ROOT}/common.sh" || exit 1 +# --- END COMMON.SH BOILERPLATE --- + +. "${SCRIPT_ROOT}/remote_access.sh" || exit 1 + +FLAGS "$@" || exit 1 + +switch_to_strict_mode + +# TMP necessary for remote_access_init. +TMP=$(mktemp -d /tmp/cros_check_chrome_version.XXXX) +trap "rm -rf ${TMP}" EXIT + +remote_access_init &> /dev/null + +remote_sh "/opt/google/chrome/chrome --version" +CHROME_VERSION=$(echo ${REMOTE_OUT} | \ + sed 's/.* \([0-9]\+.[0-9]\+.[0-9]\+.[0-9]\+\).*/\1/') +echo "${CHROME_VERSION}" diff --git a/scripts/bin/cros_make_image_bootable b/scripts/bin/cros_make_image_bootable new file mode 100755 index 000000000..aeb5f4b41 --- /dev/null +++ b/scripts/bin/cros_make_image_bootable @@ -0,0 +1,448 @@ +#!/bin/bash +# +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# Script which ensures that a given image has an up-to-date +# kernel partition, rootfs integrity hashes, and legacy bootloader configs. + +# --- BEGIN COMMON.SH BOILERPLATE --- +# Load common CrOS utilities. Inside the chroot this file is installed in +# /usr/lib/crosutils. Outside the chroot we find it relative to the script's +# location. +find_common_sh() { + local common_paths=("$(dirname "$(readlink -f "$0")")/.." /usr/lib/crosutils) + local path + + SCRIPT_ROOT="${common_paths[0]}" + for path in "${common_paths[@]}"; do + if [ -r "${path}/common.sh" ]; then + SCRIPT_ROOT="${path}" + break + fi + done +} + +find_common_sh +. "${SCRIPT_ROOT}/common.sh" || exit 1 +# --- END COMMON.SH BOILERPLATE --- + +# Need to be inside the chroot to load chromeos-common.sh + +# Load functions and constants for chromeos-install +. chromeos-common.sh || exit 1 +. "build_image_util.sh" || exit 1 +. "disk_layout_util.sh" || exit 1 +. "mount_gpt_util.sh" || exit 1 + +switch_to_strict_mode + +if [ $# -lt 2 ]; then + echo "Usage: ${0} /PATH/TO/IMAGE IMAGE.BIN [shflags overrides]" + exit 1 +fi + +IMAGE_DIR="$(readlink -f "${1}")" +IMAGE="${2}" +shift +shift +FLAG_OVERRIDES=( "$@" ) + +if get_boot_desc "${IMAGE_DIR}/boot.desc"; then + info "Boot-time configuration for ${IMAGE_DIR}:" + for flag in "${boot_desc_flags[@]}"; do + info " ${flag}" + done +else + warn "Falling back to command line parsing." +fi + +if [ ! -r "${IMAGE}" ]; then + die "${IMAGE} cannot be read!" +fi + + +set +e + +# Now parse the build settings from ${OUTPUT_DIR}/boot.desc +DEFINE_string adjust_part "" \ + "Adjustments to apply to the partition table" +DEFINE_string board "${DEFAULT_BOARD}" \ + "Board we're building for." +DEFINE_string image_type "base" \ + "Type of image we're building for (base/factory_install)." +DEFINE_string output_dir "/tmp" \ + "Directory to place output in." +DEFINE_string image "chromiumos_base.img" \ + "Full path to the chromiumos image to make bootable." +DEFINE_string arch "x86" \ + "Architecture to make bootable for: arm, mips, x86, or amd64" +DEFINE_boolean cleanup_dirs ${FLAGS_TRUE} \ + "Whether the mount dirs should be removed on completion." + +DEFINE_string boot_args "noinitrd" \ + "Additional boot arguments to pass to the commandline" + +DEFINE_integer rootfs_size 720 \ + "rootfs filesystem size in MBs." +# ceil(0.1 * rootfs_size) is a good minimum. +DEFINE_integer rootfs_hash_pad 8 \ + "MBs reserved at the end of the rootfs image." + +DEFINE_string rootfs_hash "/tmp/rootfs.hash" \ + "Path where the rootfs hash should be stored." +# TODO(taysom): when we turn on boot cache, both verification and +# bootcache should have their default be FLAGS_TRUE. +DEFINE_boolean enable_rootfs_verification ${FLAGS_TRUE} \ + "Default all bootloaders to NOT use kernel-based root fs integrity checking." +DEFINE_boolean enable_bootcache ${FLAGS_FALSE} \ + "Default all bootloaders to NOT use bootcache." +DEFINE_integer verity_error_behavior 3 \ + "Kernel verified boot error behavior (0: I/O errors, 1: reboot, 2: nothing)" +DEFINE_integer verity_max_ios -1 \ + "Number of outstanding I/O operations dm-verity caps at." +DEFINE_string verity_algorithm "sha1" \ + "Cryptographic hash algorithm used for kernel vboot." +DEFINE_string verity_salt "" \ + "Salt for rootfs hash tree." + +DEFINE_string keys_dir "${VBOOT_DEVKEYS_DIR}" \ + "Directory containing the signing keys." + +DEFINE_string rootfs_mountpoint "/tmp/rootfs" \ + "Path where the rootfs can be safely mounted" +DEFINE_string statefulfs_mountpoint "/tmp/statefulfs" \ + "Path where the statefulfs can be safely mounted" +DEFINE_string espfs_mountpoint "/tmp/espfs" \ + "Path where the espfs can be safely mounted" + +DEFINE_boolean use_dev_keys ${FLAGS_FALSE} \ + "Use developer keys for signing. (Default: false)" + +DEFINE_boolean fsck_rootfs ${FLAGS_FALSE} \ + "Check integrity of the rootfs on the modified image." + +# TODO(pkumar): Remove once known that no images are using this flag +DEFINE_boolean crosbug12352_arm_kernel_signing ${FLAGS_FALSE} \ + "This flag is deprecated but the bots still need parse old images." + +# TODO(sosa): Remove once known images no longer use this in their config. +DEFINE_string arm_extra_bootargs "" "DEPRECATED FLAG. Do not use." + +DEFINE_boolean force_developer_mode ${FLAGS_FALSE} \ + "Add cros_debug to boot args." + +DEFINE_string enable_serial "" \ + "Enable serial port for printks. Example values: ttyS0" +DEFINE_integer loglevel 7 \ + "The loglevel to add to the kernel command line." + +# Parse the boot.desc and any overrides +set -- "${boot_desc_flags[@]}" "${FLAG_OVERRIDES[@]}" +FLAGS "${@}" || exit 1 + +[ -z "${FLAGS_verity_salt}" ] && FLAGS_verity_salt=$(make_salt) + +# Only now can we die on error. shflags functions leak non-zero error codes, +# so will die prematurely if 'switch_to_strict_mode' is specified before now. +switch_to_strict_mode -u + +append_boot_flag() { + local file="$1" + local pattern="$2" + local base_pattern="$3" + + [ -f "${file}" ] || return ${FLAGS_TRUE} + grep -wq "${pattern}" "${file}" && return ${FLAGS_TRUE} + sudo sed -i "s/\b${base_pattern}\b/& ${pattern}/g" "${file}" +} + +check_kernel_size() { + local kernel_image_size="$1" + local kernel_part="$2" + local kernel_slot="$3" + local kernel_partition_size=$(get_partition_size ${FLAGS_image_type} \ + ${kernel_part}) + local kernel_partition_size_90=$(( kernel_partition_size * 90 / 100 )) + info "Kernel partition ${kernel_slot} size is ${kernel_partition_size} bytes." + if [[ ${kernel_image_size} -gt ${kernel_partition_size} ]]; then + die "Kernel image won't fit in partition ${kernel_slot}!" + elif [[ ${kernel_image_size} -gt ${kernel_partition_size_90} ]]; then + warn "Kernel partition ${kernel_slot} is more than 90% full!" + fi +} + +build_img() { + local image_name="$1" + local root_dev="$2" + local root_dev_size="$3" + local keyblock="$4" + local private="$5" + local public="$6" + local vblock=${7:-""} + + # Default to non-verified + local enable_rootfs_verification_flag=--noenable_rootfs_verification + if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then + enable_rootfs_verification_flag=--enable_rootfs_verification + fi + local enable_bootcache_flag=--noenable_bootcache + if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then + enable_bootcache_flag=--enable_bootcache + fi + + if [[ -n "${vblock}" ]]; then + vblock=--hd_vblock="${FLAGS_output_dir}/${vblock}" + fi + + ./build_kernel_image.sh \ + --board="${FLAGS_board}" \ + --arch="${FLAGS_arch}" \ + --to="${FLAGS_output_dir}/${image_name}" \ + --vmlinuz="${FLAGS_rootfs_mountpoint}/boot/vmlinuz" \ + --working_dir="${FLAGS_output_dir}" \ + --boot_args="${FLAGS_boot_args}" \ + --keep_work \ + --rootfs_image=${root_dev} \ + --rootfs_image_size=${root_dev_size} \ + --rootfs_hash=${FLAGS_rootfs_hash} \ + --verity_hash_alg=${FLAGS_verity_algorithm} \ + --verity_max_ios=${FLAGS_verity_max_ios} \ + --verity_error_behavior=${FLAGS_verity_error_behavior} \ + --verity_salt=${FLAGS_verity_salt} \ + --keys_dir="${FLAGS_keys_dir}" \ + --keyblock="${keyblock}" \ + --private="${private}" \ + --public="${public}" \ + --enable_serial="${FLAGS_enable_serial}" \ + ${vblock} \ + ${enable_rootfs_verification_flag} \ + ${enable_bootcache_flag} +} + +make_image_bootable() { + local image="$1" + + # Update legacy boot config templates (in rootfs) before rootfs is locked. + # This is required because postinst will copy new legacy boot configurations + # from rootfs partition instead of modifying existing entries in EFI + # partition. + if [ ${FLAGS_force_developer_mode} -eq ${FLAGS_TRUE} ]; then + trap "unmount_image ; die 'cros_make_image_bootable failed.'" EXIT + mount_image "${image}" "${FLAGS_rootfs_mountpoint}" \ + "${FLAGS_statefulfs_mountpoint}" "" "" + + append_boot_flag "${FLAGS_rootfs_mountpoint}/boot/syslinux/root.A.cfg" \ + "cros_debug" "cros_legacy" + append_boot_flag "${FLAGS_rootfs_mountpoint}/boot/syslinux/root.B.cfg" \ + "cros_debug" "cros_legacy" + append_boot_flag "${FLAGS_rootfs_mountpoint}/boot/syslinux/usb.A.cfg" \ + "cros_debug" "cros_legacy" + append_boot_flag "${FLAGS_rootfs_mountpoint}/boot/efi/boot/grub.cfg" \ + "cros_debug" "cros_efi" + + unmount_image + trap - EXIT + fi + + # Make the filesystem un-mountable as read-write. + # mount_gpt_image.sh will undo this as needed. + # TODO(wad) make sure there is parity in the signing scripts. + if [ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]; then + # TODO(wad) this would be a good place to reset any other ext2 metadata. + warn "Disabling r/w mount of the root filesystem" + local rootfs_offset="$(partoffset ${image} 3)" + disable_rw_mount "${image}" "$(( rootfs_offset * 512 ))" + fi + + trap "unmount_image ; die 'cros_make_image_bootable failed.'" EXIT + mount_image "${image}" "${FLAGS_rootfs_mountpoint}" \ + "${FLAGS_statefulfs_mountpoint}" "" "--safe" + + # Newer `mount` will decode the filename backing the loop device, + # so we need to dig deeper and find the answer ourselves. + root_dev=$(awk -v mnt="${FLAGS_rootfs_mountpoint}" \ + '$2 == mnt { print $1 }' /proc/mounts) + if [[ -z "${root_dev}" ]]; then + # If the read-only rootfs is not mounted via the kernel using a real block + # device and we need to create one here. So far, all the filesystems we + # use in the rootfs can be mounted read-only by the kernel (including + # squashfs) so we just exit if that's the case. + die "Didn't find the rootfs block device device after mounting it." + fi + + # We sign the image with the recovery_key, because this is what goes onto the + # USB key. We can only boot from the USB drive in recovery mode. + # For dev install shim, we need to use the installer keyblock instead of + # the recovery keyblock because of the difference in flags. + local keyblock + if [ ${FLAGS_use_dev_keys} -eq ${FLAGS_TRUE} ]; then + keyblock=installer_kernel.keyblock + info "DEBUG: use dev install keyblock" + else + keyblock=recovery_kernel.keyblock + info "DEBUG: use recovery keyblock" + fi + + if [ ${FLAGS_force_developer_mode} -eq ${FLAGS_TRUE} ]; then + FLAGS_boot_args="${FLAGS_boot_args} cros_debug" + fi + + # Builds the kernel partition image. + local partition_num_root_a="$(get_layout_partition_number \ + "${FLAGS_image_type}" ROOT-A)" + local rootfs_fs_size=$(get_filesystem_size "${FLAGS_image_type}" \ + "${partition_num_root_a}") + #build_img "vmlinuz.image" "${root_dev}" "${rootfs_fs_size}" "${keyblock}" \ + # "recovery_kernel_data_key.vbprivk" "recovery_key.vbpubk" + #build_img "hd_vmlinuz.image" "${root_dev}" "${rootfs_fs_size}" \ + # "kernel.keyblock" "kernel_data_key.vbprivk" "kernel_subkey.vbpubk" \ + # "vmlinuz_hd.vblock" + + # Check the size of kernel image and issue warning when image size is + # near the limit. + local kernel_image_size_A=$(stat -c '%s' kernels/linux.bin) + info "Kernel image A size is ${kernel_image_size_A} bytes." + local kernel_image_size_B=$(stat -c '%s' kernels/linux.bin) + info "Kernel image B size is ${kernel_image_size_B} bytes." + local partition_num_kern_a="$(get_layout_partition_number \ + "${FLAGS_image_type}" KERN-A)" + check_kernel_size ${kernel_image_size_A} ${partition_num_kern_a} A + local partition_num_kern_b="$(get_layout_partition_number \ + "${FLAGS_image_type}" KERN-B)" + check_kernel_size ${kernel_image_size_B} ${partition_num_kern_b} B + +# local rootfs_hash_size=$(stat -c '%s' ${FLAGS_rootfs_hash}) +# local rootfs_partition_size=$(get_partition_size ${FLAGS_image_type} \ +# ${partition_num_root_a}) +# local rootfs_hash_pad=$(( rootfs_partition_size - rootfs_fs_size )) +# info "Appending rootfs.hash (${rootfs_hash_size} bytes) to the root fs" +# if [[ ${rootfs_hash_size} -gt ${rootfs_hash_pad} ]] +# then +# die "rootfs_partition_size - rootfs_fs_size is less than the needed " \ +# "rootfs_hash_size (${rootfs_hash_size}), update your disk layout " \ +# "configuration" +# fi +# # Unfortunately, mount_gpt_image uses mount and not losetup to create the +# # loop devices. This means that they are not the correct size. We have to +# # write directly to the image to append the hash tree data. +# local hash_offset="$(partoffset ${image} ${partition_num_root_a})" +# hash_offset=$((hash_offset + (${rootfs_fs_size} / 512))) +# sudo dd bs=512 \ +# seek=${hash_offset} \ +# if="${FLAGS_rootfs_hash}" \ +# of="${image}" \ +# conv=notrunc \ +# status=none +# +# # Move the verification block needed for the hard disk install to the +# # stateful partition. Mount stateful fs, copy file, and umount fs. +# # In original CL: http://codereview.chromium.org/2868044, this was done in +# # create_base_image(). However, it could break the build if it is a clean +# # build because vmlinuz_hd.vblock hasn't been created by build_kernel_image.sh +# # In some builds that don't use vboot to verify the kernel, this file might +# # not get created as part of the build, so only copy them if they were. +# if [ -f "${FLAGS_output_dir}/vmlinuz_hd.vblock" ]; then +# sudo cp "${FLAGS_output_dir}/vmlinuz_hd.vblock" \ +# "${FLAGS_statefulfs_mountpoint}" +# fi + + # Install the kernel to both slots A and B so there will always be a regular + # kernel in slot B on recovery and non-recovery images. + local koffset="$(partoffset ${image} ${partition_num_kern_a})" + sudo dd if="kernels/linux.bin" of="${image}" \ + conv=notrunc bs=512 seek=${koffset} status=none + koffset="$(partoffset ${image} ${partition_num_kern_b})" + sudo dd if="kernels/linux.bin" of="${image}" \ + conv=notrunc bs=512 seek=${koffset} status=none + + # Update the bootloaders. The EFI system partition will be updated. +# local kernel_part= +# +# # We should update the esp in place in the image. +# local bootloader_to="${image}" +# local partition_num_efi_system="$(get_layout_partition_number \ +# "${FLAGS_image_type}" EFI-SYSTEM)" +# local esp_offset="$(partoffset ${image} ${partition_num_efi_system})" +# esp_offset=$((esp_offset * 512)) # sectors to bytes +# local esp_size="$(partsize ${image} ${partition_num_efi_system})" +# esp_size=$((esp_size * 512)) # sectors to bytes +# local bootloader_to_flags="--to_offset=${esp_offset} --to_size=${esp_size}" +# +# if [[ "${FLAGS_arch}" = "x86" || "${FLAGS_arch}" = "amd64" ]]; then +# # Use the kernel partition to acquire configuration flags. +# kernel_part="--kernel_partition='${FLAGS_output_dir}/vmlinuz.image'" +# # Install syslinux on the EFI System Partition. +# kernel_part="${kernel_part} --install_syslinux" +# elif [[ "${FLAGS_arch}" = "arm" || "${FLAGS_arch}" = "mips" ]]; then +# # These flags are not used for ARM / MIPS update_bootloaders.sh +# kernel_part="" +# fi +# +# if [[ ${esp_size} -gt 0 ]]; then +# # Update EFI partition +# ./update_bootloaders.sh \ +# --arch="${FLAGS_arch}" \ +# --image_type="${FLAGS_image_type}" \ +# --to="${bootloader_to}" \ +# --from="${FLAGS_rootfs_mountpoint}"/boot \ +# --vmlinuz="${FLAGS_rootfs_mountpoint}"/boot/vmlinuz \ +# ${bootloader_to_flags} \ +# ${kernel_part} +# fi +# +# # We don't need to keep these files around anymore. +# sudo rm -f "${FLAGS_rootfs_hash}" "${FLAGS_output_dir}/vmlinuz.image" \ +# "${FLAGS_output_dir}/hd_vmlinuz.image" \ +# "${FLAGS_output_dir}/vmlinuz_hd.vblock" +# + unmount_image + trap - EXIT +} + +verify_image_rootfs() { + local image=$1 + local partition_num_root_a="$(get_layout_partition_number \ + "${FLAGS_image_type}" ROOT-A)" + local rootfs_offset="$(partoffset ${image} ${partition_num_root_a})" + + local rootfs_tmp_file=$(mktemp) + trap "rm ${rootfs_tmp_file}" EXIT + sudo dd if="${image}" of="${rootfs_tmp_file}" bs=512 skip="${rootfs_offset}" \ + status=none + + # This flips the read-only compatibility flag, so that + # e2fsck does not complain about unknown file system capabilities. + enable_rw_mount "${rootfs_tmp_file}" + info "Running e2fsck to check root file system for errors" + sudo e2fsck -fn "${rootfs_tmp_file}" || + die "Root file system has errors, please ensure boot.desc and/or \ +command line parameters are correct" +} + +# Store output and temporary files next to image. +FLAGS_output_dir="${IMAGE_DIR}" +FLAGS_rootfs_hash="${IMAGE_DIR}/rootfs.hash" +FLAGS_rootfs_mountpoint="${IMAGE_DIR}/rootfs_dir" +FLAGS_statefulfs_mountpoint="${IMAGE_DIR}/stateful_dir" +FLAGS_espfs_mountpoint="${IMAGE_DIR}/esp" + +# Create the directories if they don't exist. +mkdir -p ${FLAGS_rootfs_mountpoint} +mkdir -p ${FLAGS_statefulfs_mountpoint} +mkdir -p ${FLAGS_espfs_mountpoint} + +make_image_bootable "${IMAGE}" +if type -p board_make_image_bootable; then + board_make_image_bootable "${IMAGE}" +fi +if [ ${FLAGS_fsck_rootfs} -eq ${FLAGS_TRUE} ]; then + verify_image_rootfs "${IMAGE}" +fi + +if [ ${FLAGS_cleanup_dirs} -eq ${FLAGS_TRUE} ]; then + rmdir ${FLAGS_rootfs_mountpoint} + rmdir ${FLAGS_statefulfs_mountpoint} + rmdir ${FLAGS_espfs_mountpoint} +fi diff --git a/scripts/bin/cros_start_vm b/scripts/bin/cros_start_vm new file mode 100755 index 000000000..ffb9f59ae --- /dev/null +++ b/scripts/bin/cros_start_vm @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# Simple wrapper script to start a vm using the vm lib. +. "$(dirname "$0")/../common.sh" || exit 1 + +. "${SCRIPTS_DIR}/lib/cros_vm_lib.sh" || die "Unable to load cros_vm_lib.sh" +. "${SCRIPTS_DIR}/lib/cros_vm_constants.sh" || \ + die "Unable to load cros_vm_constants.sh" + +DEFINE_string board "${DEFAULT_BOARD}" \ + "Board for VM image (unnecessary if path given)" +DEFINE_string image_path "" "Full path of the VM image" + +set -e + +# Parse command line. +FLAGS "$@" || exit 1 +eval set -- "${FLAGS_ARGV}" + +# Use latest if not specified. +if [ -z "${FLAGS_image_path}" ]; then + LATEST_IMAGE="$(${SCRIPTS_DIR}/get_latest_image.sh \ + --board=${FLAGS_board})/${DEFAULT_QEMU_IMAGE}" + info "Using latest vm image ${LATEST_IMAGE}" + FLAGS_image_path=${LATEST_IMAGE} +fi + +[ -e "${FLAGS_image_path}" ] || die "Image ${FLAGS_image_path} does not exist." + +start_kvm "${FLAGS_image_path}" "${FLAGS_board}" + +echo "ssh root@localhost -p ${FLAGS_ssh_port} -o StrictHostKeyChecking=no" diff --git a/scripts/bin/cros_stop_vm b/scripts/bin/cros_stop_vm new file mode 100755 index 000000000..7811bee50 --- /dev/null +++ b/scripts/bin/cros_stop_vm @@ -0,0 +1,24 @@ +#!/bin/bash + +# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# Simple wrapper script to stop a vm specified from a pid file. +. "$(dirname "$0")/../common.sh" || exit 1 + +. "${SCRIPTS_DIR}/lib/cros_vm_lib.sh" || die "Unable to load cros_vm_lib.sh" + +set -e + +# Parse command line. +FLAGS "$@" || exit 1 +eval set -- "${FLAGS_ARGV}" + +# Requires pid file to be set. +if [ -z "${FLAGS_kvm_pid}" ]; then + die "Must specify file with pid of kvm to kill." +fi + +KVM_PID_FILE="${FLAGS_kvm_pid}" +stop_kvm diff --git a/scripts/bin/proxy-gw b/scripts/bin/proxy-gw new file mode 100755 index 000000000..19fe3da6b --- /dev/null +++ b/scripts/bin/proxy-gw @@ -0,0 +1,63 @@ +#!/bin/bash +# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# $1 = hostname, $2 = port +# +# Use socat to connect to the specified host and port via one of the proxies +# defined in the environment, if the target host does not appear in the +# no_proxy environment variable. + +DEST_HOST="$1" +DEST_PORT="$2" + +# Determine whether the destination host is in the "no_proxy" list. +use_proxy="true" +GLOBIGNORE="*" +for a_host in ${no_proxy//,/ } ; do + case "${a_host}" in + "*") # A "*" matches all hosts. + use_proxy="false" + break + ;; + .*) # Items of the form ".some.fqdn" imply match-at-end. + if [[ "${DEST_HOST}" == *"${a_host}" ]]; then + use_proxy="false" + break + fi + ;; + ${DEST_HOST}) # Items of the form "some.fqdn" imply exact-match. + use_proxy="false" + break + ;; + esac +done + +if [[ -n "${all_proxy}" ]]; then + PROXY="${all_proxy}" + TYPE=SOCKS4 + PORT_ATTR=socksport +elif [[ -n "${https_proxy}" ]]; then + PROXY="${https_proxy}" + TYPE=PROXY + PORT_ATTR=proxyport +elif [[ -n "${http_proxy}" ]]; then + PROXY="${http_proxy}" + TYPE=PROXY + PORT_ATTR=proxyport +else + use_proxy="false" +fi + +if [[ "${use_proxy}" == "true" ]]; then + PROXY="${PROXY#*://}" + PROXY="${PROXY%%/*}" + PROXY_HOST="${PROXY%%:*}" + PROXY_PORT="${PROXY##*:}" + PARMS="${PROXY_HOST}:${DEST_HOST}:${DEST_PORT},${PORT_ATTR}=${PROXY_PORT}" + socat_args=( "${TYPE}:${PARMS}" ) +else + socat_args=( TCP:"${DEST_HOST}":"${DEST_PORT}" ) +fi +exec socat STDIO "${socat_args[@]}" diff --git a/scripts/board_options.sh b/scripts/board_options.sh new file mode 100755 index 000000000..50f6243fa --- /dev/null +++ b/scripts/board_options.sh @@ -0,0 +1,25 @@ +# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +if [[ -z "${FLAGS_board}" ]]; then + if [[ -z "${SRC_IMAGE}" ]]; then + die "--board or SRC_IMAGE are required." + fi + + # If --board is not set, get the board name from the image. + FLAGS_board="$( + . "${BUILD_LIBRARY_DIR}/mount_gpt_util.sh" + get_board_from_image "${SRC_IMAGE}" + )" +fi + +BOARD="${FLAGS_board}" +BOARD_ROOT="/build/${BOARD}" + +if [[ ! -d "${BOARD_ROOT}" ]]; then + die_notrace "The board has not been set up: ${BOARD}" +fi + +# What cross-build are we targeting? +. "${BOARD_ROOT}/etc/make.conf.board_setup" diff --git a/scripts/build_image_util.sh b/scripts/build_image_util.sh new file mode 100755 index 000000000..9dd17e9d5 --- /dev/null +++ b/scripts/build_image_util.sh @@ -0,0 +1,326 @@ +# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Shell library for functions and initialization private to +# build_image, and not specific to any particular kind of image. +# +# TODO(jrbarnette): There's nothing holding this code together in +# one file aside from its lack of anywhere else to go. Probably, +# this file should get broken up or otherwise reorganized. + +# Use canonical path since some tools (e.g. mount) do not like symlinks. +# Append build attempt to output directory. +IMAGE_SUBDIR="R${CHROME_BRANCH}" +if [ -z "${FLAGS_version}" ]; then + IMAGE_SUBDIR="${IMAGE_SUBDIR}-${CHROMEOS_VERSION_STRING}-a\ +${FLAGS_build_attempt}" +else + IMAGE_SUBDIR="${IMAGE_SUBDIR}-${FLAGS_version}" +fi + +if [ -n "${FLAGS_output_suffix}" ]; then + IMAGE_SUBDIR="${IMAGE_SUBDIR}-${FLAGS_output_suffix}" +fi + +BUILD_DIR="${FLAGS_build_root}/${BOARD}/${IMAGE_SUBDIR}" +OUTPUT_DIR="${FLAGS_output_root}/${BOARD}/${IMAGE_SUBDIR}" +OUTSIDE_OUTPUT_DIR="../build/images/${BOARD}/${IMAGE_SUBDIR}" +IMAGES_TO_BUILD= + +EMERGE_BOARD_CMD="$GCLIENT_ROOT/chromite/bin/parallel_emerge" +EMERGE_BOARD_CMD="$EMERGE_BOARD_CMD --board=$BOARD" + +export INSTALL_MASK="${DEFAULT_INSTALL_MASK}" + +if [[ $FLAGS_jobs -ne -1 ]]; then + EMERGE_JOBS="--jobs=$FLAGS_jobs" +fi + +# Populates list of IMAGES_TO_BUILD from args passed in. +# Arguments should be the shortnames of images we want to build. +get_images_to_build() { + local image_to_build + for image_to_build in $*; do + # Shflags leaves "'"s around ARGV. + case ${image_to_build} in + \'base\' ) + IMAGES_TO_BUILD="${IMAGES_TO_BUILD} ${CHROMEOS_BASE_IMAGE_NAME}" + ;; + \'dev\' ) + IMAGES_TO_BUILD="${IMAGES_TO_BUILD} ${CHROMEOS_DEVELOPER_IMAGE_NAME}" + ;; + \'test\' ) + IMAGES_TO_BUILD="${IMAGES_TO_BUILD} ${CHROMEOS_TEST_IMAGE_NAME}" + ;; + \'factory_install\' ) + IMAGES_TO_BUILD="${IMAGES_TO_BUILD} \ + ${CHROMEOS_FACTORY_INSTALL_SHIM_NAME}" + ;; + * ) + die "${image_to_build} is not an image specification." + ;; + esac + done + + # Set default if none specified. + if [ -z "${IMAGES_TO_BUILD}" ]; then + IMAGES_TO_BUILD=${CHROMEOS_DEVELOPER_IMAGE_NAME} + fi + + info "The following images will be built ${IMAGES_TO_BUILD}." +} + +# Look at flags to determine which image types we should build. +parse_build_image_args() { + get_images_to_build ${FLAGS_ARGV} + if should_build_image ${CHROMEOS_BASE_IMAGE_NAME} \ + ${CHROMEOS_DEVELOPER_IMAGE_NAME} ${CHROMEOS_TEST_IMAGE_NAME} && \ + should_build_image ${CHROMEOS_FACTORY_INSTALL_SHIM_NAME}; then + die_notrace \ + "Can't build ${CHROMEOS_FACTORY_INSTALL_SHIM_NAME} with any other" \ + "image." + fi + if should_build_image ${CHROMEOS_FACTORY_INSTALL_SHIM_NAME}; then + # For factory, force rootfs verification and bootcache off + FLAGS_enable_rootfs_verification=${FLAGS_FALSE} + FLAGS_enable_bootcache=${FLAGS_FALSE} + FLAGS_bootcache_use_board_default=${FLAGS_FALSE} + fi +} + +check_blacklist() { + info "Verifying that the base image does not contain a blacklisted package." + info "Generating list of packages for ${BASE_PACKAGE}." + local package_blacklist_file="${BUILD_LIBRARY_DIR}/chromeos_blacklist" + if [ ! -e "${package_blacklist_file}" ]; then + warn "Missing blacklist file." + return + fi + local blacklisted_packages=$(${SCRIPTS_DIR}/get_package_list \ + --board="${BOARD}" "${BASE_PACKAGE}" \ + | grep -x -f "${package_blacklist_file}") + if [ -n "${blacklisted_packages}" ]; then + die "Blacklisted packages found: ${blacklisted_packages}." + fi + info "No blacklisted packages found." +} + +make_salt() { + # It is not important that the salt be cryptographically strong; it just needs + # to be different for each release. The purpose of the salt is just to ensure + # that if someone collides a block in one release, they can't reuse it in + # future releases. + xxd -l 32 -p -c 32 /dev/urandom +} + +# Create a boot.desc file containing flags used to create this image. +# The format is a bit fragile -- make sure get_boot_desc parses it back. +create_boot_desc() { + local image_type=$1 + + local enable_rootfs_verification_flag="" + if [[ ${FLAGS_enable_rootfs_verification} -eq ${FLAGS_TRUE} ]]; then + enable_rootfs_verification_flag="--enable_rootfs_verification" + fi + local enable_bootcache_flag="" + if [[ ${FLAGS_enable_bootcache} -eq ${FLAGS_TRUE} ]]; then + enable_bootcache_flag=--enable_bootcache + fi + + [ -z "${FLAGS_verity_salt}" ] && FLAGS_verity_salt=$(make_salt) + cat < ${BUILD_DIR}/boot.desc + --board=${BOARD} + --image_type=${image_type} + --arch="${ARCH}" + --keys_dir="${VBOOT_DEVKEYS_DIR}" + --boot_args="${FLAGS_boot_args}" + --nocleanup_dirs + --verity_algorithm=sha1 + --enable_serial="${FLAGS_enable_serial}" + --loglevel="${FLAGS_loglevel}" + ${enable_rootfs_verification_flag} + ${enable_bootcache_flag} +EOF +} + +# Extract flags saved in boot.desc and return it via the boot_desc_flags array. +get_boot_desc() { + local boot_desc_file=$1 + local line + + if [[ ! -r ${boot_desc_file} ]]; then + warn "${boot_desc_file}: cannot be read" + return 1 + fi + + # Do not mark this local as it is the return value. + boot_desc_flags=() + while read line; do + if [[ -z ${line} ]]; then + continue + fi + + # Hand extract the quotes to deal with random content in the value. + # e.g. When you pass --boot_args="foo=\"\$bar'" to build_image, we write it + # out in the file as --boot_args="foo="$bar'" which is a parse error if we + # tried to eval it directly. + line=$(echo "${line}" | sed -r \ + -e 's:^\s+::;s:\s+$::' -e "s:^(--[^=]+=)([\"'])(.*)\2$:\1\3:") + boot_desc_flags+=( "${line}" ) + done <"${boot_desc_file}" +} + +# Utility function for moving the build directory to the output root. +move_image() { + local source="$1" + local destination="$2" + # If the output_root isn't the same as the build_root, move the resulting + # image to the correct place in output_root. + if [[ "${source}" != "${destination}" ]]; then + info "Moving the image to: ${destination}." + mkdir -p "${destination}" + mv "${source}"/* "${destination}" + rmdir "${source}" + fi +} + +delete_prompt() { + echo "An error occurred in your build so your latest output directory" \ + "is invalid." + + # Only prompt if both stdin and stdout are a tty. If either is not a tty, + # then the user may not be present, so we shouldn't bother prompting. + if [ -t 0 -a -t 1 -a "${USER}" != 'chrome-bot' ]; then + read -p "Would you like to delete the output directory (y/N)? " SURE + SURE="${SURE:0:1}" # Get just the first character. + else + SURE="y" + echo "Running in non-interactive mode so deleting output directory." + fi + if [ "${SURE}" == "y" ] ; then + sudo rm -rf "${BUILD_DIR}" + echo "Deleted ${BUILD_DIR}" + else + move_image "${BUILD_DIR}" "${OUTPUT_DIR}" + echo "Not deleting ${OUTPUT_DIR}." + fi +} + +# Basic command to emerge binary packages into the target image. +# Arguments to this command are passed as addition options/arguments +# to the basic emerge command. +emerge_to_image() { + sudo -E ${EMERGE_BOARD_CMD} --root-deps=rdeps --usepkgonly -v \ + "$@" ${EMERGE_JOBS} +} + +# Create the /etc/shadow file with all the right entries. +SHARED_USER_NAME="chronos" +SHARED_USER_PASSWD_FILE="/etc/shared_user_passwd.txt" +setup_etc_shadow() { + local root=$1 + local shadow="${root}/etc/shadow" + local passwd="${root}/etc/passwd" + local line + local cmds + + # Remove the file completely so we know it is fully initialized + # with the correct permissions. Note: we're just making it writable + # here to simplify scripting; permission fixing happens at the end. + cmds=( + "rm -f '${shadow}'" + "install -m 666 /dev/null '${shadow}'" + ) + sudo_multi "${cmds[@]}" + + # Create shadow entries for all accounts in /etc/passwd that says + # they expect it. Otherwise, pam will not let people even log in + # via ssh keyauth. http://crbug.com/361864 + while read -r line; do + local acct=$(cut -d: -f1 <<<"${line}") + local pass=$(cut -d: -f2 <<<"${line}") + + # For the special shared user account, load the shared user password + # if one has been set. + if [[ ${acct} == "${SHARED_USER_NAME}" && + -e "${SHARED_USER_PASSWD_FILE}" ]]; then + pass=$(<"${SHARED_USER_PASSWD_FILE}") + fi + + case ${pass} in + # Login is disabled -> do nothing. + '!') ;; + # Password will be set later by tools. + '*') ;; + # Password is shadowed. + 'x') + echo "${acct}:*:::::::" >> "${shadow}" + ;; + # Password is set directly. + *) + echo "${acct}:${pass}:::::::" >> "${shadow}" + ;; + esac + done <"${passwd}" + + # Now make the settings sane. + cmds=( + "chown 0:0 '${shadow}'" + "chmod 600 '${shadow}'" + ) + sudo_multi "${cmds[@]}" +} + +# ldconfig cannot generate caches for non-native arches. +# Use qemu & the native ldconfig to work around that. +# http://crbug.com/378377 +run_ldconfig() { + local root_fs_dir=$1 + case ${ARCH} in + arm) + sudo qemu-arm "${root_fs_dir}"/sbin/ldconfig -r "${root_fs_dir}";; + mips) + sudo qemu-mipsel "${root_fs_dir}"/sbin/ldconfig -r "${root_fs_dir}";; + x86|amd64) + sudo ldconfig -r "${root_fs_dir}";; + *) + die "Unable to run ldconfig for ARCH ${ARCH}" + esac +} + +# Runs "depmod" to recalculate the kernel module dependencies. +# Args: +# board_root: root of the build output for the board +# root_fs_dir: target root file system mount point +run_depmod() { + local board_root="$1" + local root_fs_dir="$2" + + local root_fs_modules_path="${root_fs_dir}/lib/modules" + if [[ ! -d "${root_fs_modules_path}" ]]; then + return + fi + + local kernel_path + for kernel_path in "${root_fs_modules_path}/"*; do + local kernel_release="$(basename ${kernel_path})" + local kernel_out_dir="${board_root}/lib/modules/${kernel_release}/build" + local system_map="${kernel_out_dir}/System.map" + + if [[ -r "${system_map}" ]]; then + sudo depmod -ae -F "${system_map}" -b "${root_fs_dir}" "${kernel_release}" + fi + done +} + +# Newer udev versions do not pay attention to individual *.hwdb files +# but require up to date /etc/udev/hwdb.bin. Let's [re]generate it as +# part of build process. +# +# Since hwdb is a generic "key/value database based on modalias strings" +# the version of udevadm found on the host should suffice. +run_udevadm_hwdb() { + local root_fs_dir="$1" + sudo udevadm hwdb --update -r "${root_fs_dir}" +} diff --git a/scripts/cgpt b/scripts/cgpt new file mode 100755 index 000000000..13cc4e357 Binary files /dev/null and b/scripts/cgpt differ diff --git a/scripts/cgpt.py b/scripts/cgpt.py new file mode 100755 index 000000000..5f00937b3 --- /dev/null +++ b/scripts/cgpt.py @@ -0,0 +1,1558 @@ +#!/usr/bin/env python2 +# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Parse and operate based on disk layout files.""" + +from __future__ import print_function + +import argparse +import copy +import json +import math +import os +import re +import sys + + +class ConfigNotFound(Exception): + """Config Not Found""" + +class PartitionNotFound(Exception): + """Partition Not Found""" + +class InvalidLayout(Exception): + """Invalid Layout""" + +class InvalidAdjustment(Exception): + """Invalid Adjustment""" + +class InvalidSize(Exception): + """Invalid Size""" + +class ConflictingOptions(Exception): + """Conflicting Options""" + +class ConflictingPartitionOrder(Exception): + """The partition order in the parent and child layout don't match.""" + +class MismatchedRootfsFormat(Exception): + """Rootfs partitions in different formats""" + +class MismatchedRootfsBlocks(Exception): + """Rootfs partitions have different numbers of reserved erase blocks""" + +class MissingEraseBlockField(Exception): + """Partition has reserved erase blocks but not other fields needed""" + +class ExcessFailureProbability(Exception): + """Chances are high that the partition will have too many bad blocks""" + +class UnalignedPartition(Exception): + """Partition size does not divide erase block size""" + +class ExpandNandImpossible(Exception): + """Partition is raw NAND and marked with the incompatible expand feature""" + +class ExcessPartitionSize(Exception): + """Partitions sum to more than the size of the whole device""" + +COMMON_LAYOUT = 'common' +BASE_LAYOUT = 'base' +# Blocks of the partition entry array. +SIZE_OF_PARTITION_ENTRY_ARRAY = 32 +SIZE_OF_PMBR = 1 +SIZE_OF_GPT_HEADER = 1 + + +def ParseHumanNumber(operand): + """Parse a human friendly number + + This handles things like 4GiB and 4MB and such. See the usage string for + full details on all the formats supported. + + Args: + operand: The number to parse (may be an int or string) + + Returns: + An integer + """ + operand = str(operand) + negative = -1 if operand.startswith('-') else 1 + if negative == -1: + operand = operand[1:] + operand_digits = re.sub(r'\D', r'', operand) + + size_factor = block_factor = 1 + suffix = operand[len(operand_digits):].strip() + if suffix: + size_factors = {'B': 0, 'K': 1, 'M': 2, 'G': 3, 'T': 4,} + try: + size_factor = size_factors[suffix[0].upper()] + except KeyError: + raise InvalidAdjustment('Unknown size type %s' % suffix) + if size_factor == 0 and len(suffix) > 1: + raise InvalidAdjustment('Unknown size type %s' % suffix) + block_factors = {'': 1024, 'B': 1000, 'IB': 1024,} + try: + block_factor = block_factors[suffix[1:].upper()] + except KeyError: + raise InvalidAdjustment('Unknown size type %s' % suffix) + + return int(operand_digits) * pow(block_factor, size_factor) * negative + + +def ProduceHumanNumber(number): + """A simple reverse of ParseHumanNumber, converting a number to human form. + + Args: + number: A number (int) to be converted to human form. + + Returns: + A string, such as "1 KiB", that satisfies the condition + ParseHumanNumber(ProduceHumanNumber(i)) == i. + """ + scales = [ + (2**40, 'Ti'), + (10**12, 'T'), + (2**30, 'Gi'), + (10**9, 'G'), + (2**20, 'Mi'), + (10**6, 'M'), + (2**10, 'Ki'), + (10**3, 'K') + ] + for denom, suffix in scales: + if (number % denom) == 0: + return '%d %sB' % (number // denom, suffix) + return str(number) + + +def ParseRelativeNumber(max_number, number): + """Return the number that is relative to |max_number| by |number| + + We support three forms: + 90% - |number| is a percentage of |max_number| + 100 - |number| is the answer already (and |max_number| is ignored) + -90 - |number| is subtracted from |max_number| + + Args: + max_number: The limit to use when |number| is negative or a percent + number: The (possibly relative) number to parse (may be an int or string) + """ + max_number = int(max_number) + number = str(number) + if number.endswith('%'): + percent = float(number[:-1]) / 100 + return int(max_number * percent) + else: + number = ParseHumanNumber(number) + if number < 0: + return max_number + number + else: + return number + + +def _ApplyLayoutOverrides(layout_to_override, layout): + """Applies |layout| overrides on to |layout_to_override|. + + First add missing partition from layout to layout_to_override. + Then, update partitions in layout_to_override with layout information. + """ + # First check that all the partitions defined in both layouts are defined in + # the same order in each layout. Otherwise, the order in which they end up + # in the merged layout doesn't match what the user sees in the child layout. + common_nums = set.intersection( + {part['num'] for part in layout_to_override if 'num' in part}, + {part['num'] for part in layout if 'num' in part}) + layout_to_override_order = [part['num'] for part in layout_to_override + if part.get('num') in common_nums] + layout_order = [part['num'] for part in layout + if part.get('num') in common_nums] + if layout_order != layout_to_override_order: + raise ConflictingPartitionOrder( + 'Layouts share partitions %s but they are in different order: ' + 'layout_to_override: %s, layout: %s' % ( + sorted(common_nums), + [part.get('num') for part in layout_to_override], + [part.get('num') for part in layout])) + + # Merge layouts with the partitions in the same order they are in both + # layouts. + part_index = 0 + for part_to_apply in layout: + num = part_to_apply.get('num') + + if part_index == len(layout_to_override): + # The part_to_apply is past the list of partitions to override, this + # means that is a new partition added at the end. + # Need of deepcopy, in case we change layout later. + layout_to_override.append(copy.deepcopy(part_to_apply)) + elif layout_to_override[part_index].get('num') is None and num is None: + # Allow modifying gaps after a partition. + # TODO(deymo): Drop support for "gap" partitions and use alignment + # instead. + layout_to_override[part_index].update(part_to_apply) + elif num in common_nums: + while layout_to_override[part_index].get('num') != num: + part_index += 1 + layout_to_override[part_index].update(part_to_apply) + else: + # Need of deepcopy, in case we change layout later. + layout_to_override.insert(part_index, copy.deepcopy(part_to_apply)) + part_index += 1 + + +def LoadJSONWithComments(filename): + """Loads a JSON file ignoring lines with comments. + + RFC 7159 doesn't allow comments on the file JSON format. This functions loads + a JSON file removing all the comment lines. A comment line is any line + starting with # and optionally indented with whitespaces. Note that inline + comments are not supported. + + Args: + filename: The input filename. + + Returns: + The parsed JSON object. + """ + regex = re.compile(r'^\s*#.*') + with open(filename) as f: + source = ''.join(regex.sub('', line) for line in f) + return json.loads(source) + + +def _LoadStackedPartitionConfig(filename): + """Loads a partition table and its possible parent tables. + + This does very little validation. It's just enough to walk all of the parent + files and merges them with the current config. Overall validation is left to + the caller. + + Args: + filename: Filename to load into object. + + Returns: + Object containing disk layout configuration + """ + if not os.path.exists(filename): + raise ConfigNotFound('Partition config %s was not found!' % filename) + config = LoadJSONWithComments(filename) + + # Let's first apply our new configs onto base. + common_layout = config['layouts'].setdefault(COMMON_LAYOUT, []) + for layout_name, layout in config['layouts'].iteritems(): + # Don't apply on yourself. + if layout_name == COMMON_LAYOUT or layout_name == '_comment': + continue + + # Need to copy a list of dicts so make a deep copy. + working_layout = copy.deepcopy(common_layout) + _ApplyLayoutOverrides(working_layout, layout) + config['layouts'][layout_name] = working_layout + + dirname = os.path.dirname(filename) + # Now let's inherit the values from all our parents. + for parent in config.get('parent', '').split(): + parent_filename = os.path.join(dirname, parent) + if not os.path.exists(parent_filename): + # Try loading the parent file from the cgpt.py directory (global config). + parent_filename = os.path.join(os.path.join(os.path.dirname(__file__), + parent)) + parent_config = _LoadStackedPartitionConfig(parent_filename) + + # First if the parent is missing any fields the new config has, fill them + # in. + for key in config.keys(): + if key == 'parent': + continue + elif key == 'metadata': + # We handle this especially to allow for inner metadata fields to be + # added / modified. + parent_config.setdefault(key, {}) + parent_config[key].update(config[key]) + else: + parent_config.setdefault(key, config[key]) + + # The overrides work by taking the parent_config, apply the new config + # layout info, and return the resulting config which is stored in the parent + # config. + + # So there's an issue where an inheriting layout file may contain new + # layouts not previously defined in the parent layout. Since we are + # building these layout files based on the parent configs and overriding + # new values, we first add the new layouts not previously defined in the + # parent config using a copy of the base layout from that parent config. + parent_layouts = set(parent_config['layouts']) + config_layouts = set(config['layouts']) + new_layouts = config_layouts - parent_layouts + + # Actually add the copy. Use a copy such that each is unique. + parent_cmn_layout = parent_config['layouts'].setdefault(COMMON_LAYOUT, []) + for layout_name in new_layouts: + parent_config['layouts'][layout_name] = copy.deepcopy(parent_cmn_layout) + + # Iterate through each layout in the parent config and apply the new layout. + common_layout = config['layouts'].setdefault(COMMON_LAYOUT, []) + for layout_name, parent_layout in parent_config['layouts'].iteritems(): + if layout_name == '_comment': + continue + + layout_override = config['layouts'].setdefault(layout_name, []) + if layout_name != COMMON_LAYOUT: + _ApplyLayoutOverrides(parent_layout, common_layout) + + _ApplyLayoutOverrides(parent_layout, layout_override) + + config = parent_config + + config.pop('parent', None) + return config + + +def LoadPartitionConfig(filename): + """Loads a partition tables configuration file into a Python object. + + Args: + filename: Filename to load into object + + Returns: + Object containing disk layout configuration + """ + + valid_keys = set(('_comment', 'metadata', 'layouts', 'parent')) + valid_layout_keys = set(( + '_comment', 'num', 'blocks', 'block_size', 'fs_blocks', 'fs_block_size', + 'uuid', 'label', 'format', 'fs_format', 'type', 'features', + 'size', 'fs_size', 'fs_options', 'erase_block_size', 'hybrid_mbr', + 'reserved_erase_blocks', 'max_bad_erase_blocks', 'external_gpt', + 'page_size', 'size_min', 'fs_size_min')) + valid_features = set(('expand',)) + + config = _LoadStackedPartitionConfig(filename) + try: + metadata = config['metadata'] + for key in ('block_size', 'fs_block_size'): + metadata[key] = ParseHumanNumber(metadata[key]) + + unknown_keys = set(config.keys()) - valid_keys + if unknown_keys: + raise InvalidLayout('Unknown items: %r' % unknown_keys) + + if len(config['layouts']) <= 0: + raise InvalidLayout('Missing "layouts" entries') + + if not BASE_LAYOUT in config['layouts'].keys(): + raise InvalidLayout('Missing "base" config in "layouts"') + + for layout_name, layout in config['layouts'].iteritems(): + if layout_name == '_comment': + continue + + for part in layout: + unknown_keys = set(part.keys()) - valid_layout_keys + if unknown_keys: + raise InvalidLayout('Unknown items in layout %s: %r' % + (layout_name, unknown_keys)) + + if part.get('num') == 'metadata' and 'type' not in part: + part['type'] = 'blank' + + if part['type'] != 'blank': + for s in ('num', 'label'): + if not s in part: + raise InvalidLayout('Layout "%s" missing "%s"' % (layout_name, s)) + + if 'size' in part: + if 'blocks' in part: + raise ConflictingOptions( + '%s: Conflicting settings are used. ' + 'Found section sets both \'blocks\' and \'size\'.' % + part['label']) + part['bytes'] = ParseHumanNumber(part['size']) + if 'size_min' in part: + size_min = ParseHumanNumber(part['size_min']) + if part['bytes'] < size_min: + part['bytes'] = size_min + part['blocks'] = part['bytes'] / metadata['block_size'] + + if part['bytes'] % metadata['block_size'] != 0: + raise InvalidSize( + 'Size: "%s" (%s bytes) is not an even number of block_size: %s' + % (part['size'], part['bytes'], metadata['block_size'])) + + if 'fs_size' in part: + part['fs_bytes'] = ParseHumanNumber(part['fs_size']) + if 'fs_size_min' in part: + fs_size_min = ParseHumanNumber(part['fs_size_min']) + if part['fs_bytes'] < fs_size_min: + part['fs_bytes'] = fs_size_min + if part['fs_bytes'] <= 0: + raise InvalidSize( + 'File system size "%s" must be positive' % + part['fs_size']) + if part['fs_bytes'] > part['bytes']: + raise InvalidSize( + 'Filesystem may not be larger than partition: %s %s: %d > %d' % + (layout_name, part['label'], part['fs_bytes'], part['bytes'])) + if part['fs_bytes'] % metadata['fs_block_size'] != 0: + raise InvalidSize( + 'File system size: "%s" (%s bytes) is not an even number of ' + 'fs blocks: %s' % + (part['fs_size'], part['fs_bytes'], metadata['fs_block_size'])) + if part.get('format') == 'ubi': + part_meta = GetMetadataPartition(layout) + page_size = ParseHumanNumber(part_meta['page_size']) + eb_size = ParseHumanNumber(part_meta['erase_block_size']) + ubi_eb_size = eb_size - 2 * page_size + if (part['fs_bytes'] % ubi_eb_size) != 0: + # Trim fs_bytes to multiple of UBI eraseblock size. + fs_bytes = part['fs_bytes'] - (part['fs_bytes'] % ubi_eb_size) + raise InvalidSize( + 'File system size: "%s" (%d bytes) is not a multiple of UBI ' + 'erase block size (%d). Please set "fs_size" to "%s" in the ' + '"common" layout instead.' % + (part['fs_size'], part['fs_bytes'], ubi_eb_size, + ProduceHumanNumber(fs_bytes))) + + if 'blocks' in part: + part['blocks'] = ParseHumanNumber(part['blocks']) + part['bytes'] = part['blocks'] * metadata['block_size'] + + if 'fs_blocks' in part: + max_fs_blocks = part['bytes'] / metadata['fs_block_size'] + part['fs_blocks'] = ParseRelativeNumber(max_fs_blocks, + part['fs_blocks']) + part['fs_bytes'] = part['fs_blocks'] * metadata['fs_block_size'] + + if part['fs_bytes'] > part['bytes']: + raise InvalidLayout( + 'Filesystem may not be larger than partition: %s %s: %d > %d' % + (layout_name, part['label'], part['fs_bytes'], part['bytes'])) + if 'erase_block_size' in part: + part['erase_block_size'] = ParseHumanNumber(part['erase_block_size']) + if 'page_size' in part: + part['page_size'] = ParseHumanNumber(part['page_size']) + + part.setdefault('features', []) + unknown_features = set(part['features']) - valid_features + if unknown_features: + raise InvalidLayout('%s: Unknown features: %s' % + (part['label'], unknown_features)) + except KeyError as e: + raise InvalidLayout('Layout is missing required entries: %s' % e) + + return config + + +def _GetPrimaryEntryArrayLBA(config): + """Return the start LBA of the primary partition entry array. + + Normally this comes after the primary GPT header but can be adjusted by + setting the "primary_entry_array_lba" key under "metadata" in the config. + + Args: + config: The config dictionary. + + Returns: + The position of the primary partition entry array. + """ + + pmbr_and_header_size = SIZE_OF_PMBR + SIZE_OF_GPT_HEADER + entry_array = config['metadata'].get('primary_entry_array_lba', + pmbr_and_header_size) + if entry_array < pmbr_and_header_size: + raise InvalidLayout('Primary entry array (%d) must be at least %d.' % + entry_array, pmbr_and_header_size) + return entry_array + + +def _HasBadEraseBlocks(partitions): + return 'max_bad_erase_blocks' in GetMetadataPartition(partitions) + + +def _HasExternalGpt(partitions): + return GetMetadataPartition(partitions).get('external_gpt', False) + + +def _GetStartSector(config, partitions): + """Return the first usable location (LBA) for partitions. + + This value is the first LBA after the PMBR, the primary GPT header, and + partition entry array. + + We round it up to 64 to maintain the same layout as before in the normal (no + padding between the primary GPT header and its partition entry array) case. + + Args: + config: The config dictionary. + partitions: List of partitions to process + + Returns: + A suitable LBA for partitions, at least 64. + """ + + if _HasExternalGpt(partitions): + # If the GPT is external, then the offset of the partitions' actual data + # will be 0, and we don't need to make space at the beginning for the GPT. + return 0 + else: + entry_array = _GetPrimaryEntryArrayLBA(config) + start_sector = max(entry_array + SIZE_OF_PARTITION_ENTRY_ARRAY, 64) + return start_sector + + +def GetTableTotals(config, partitions): + """Calculates total sizes/counts for a partition table. + + Args: + config: Partition configuration file object + partitions: List of partitions to process + + Returns: + Dict containing totals data + """ + + start_sector = _GetStartSector(config, partitions) + ret = { + 'expand_count': 0, + 'expand_min': 0, + 'block_count': start_sector * config['metadata']['block_size'] + } + + # Total up the size of all non-expanding partitions to get the minimum + # required disk size. + for partition in partitions: + if partition.get('num') == 'metadata': + continue + if 'expand' in partition['features']: + ret['expand_count'] += 1 + ret['expand_min'] += partition['blocks'] + else: + ret['block_count'] += partition['blocks'] + + # At present, only one expanding partition is permitted. + # Whilst it'd be possible to have two, we don't need this yet + # and it complicates things, so it's been left out for now. + if ret['expand_count'] > 1: + raise InvalidLayout('1 expand partition allowed, %d requested' + % ret['expand_count']) + + ret['min_disk_size'] = ret['block_count'] + ret['expand_min'] + + return ret + + +def GetPartitionTable(options, config, image_type): + """Generates requested image_type layout from a layout configuration. + + This loads the base table and then overlays the requested layout over + the base layout. + + Args: + options: Flags passed to the script + config: Partition configuration file object + image_type: Type of image eg base/test/dev/factory_install + + Returns: + Object representing a selected partition table + """ + + # We make a deep copy so that changes to the dictionaries in this list do not + # persist across calls. + partitions = copy.deepcopy(config['layouts'][image_type]) + metadata = config['metadata'] + + # Convert fs_options to a string. + for partition in partitions: + fs_options = partition.get('fs_options', '') + if isinstance(fs_options, dict): + fs_format = partition.get('fs_format') + fs_options = fs_options.get(fs_format, '') + elif not isinstance(fs_options, basestring): + raise InvalidLayout('Partition number %s: fs_format must be a string or ' + 'dict, not %s' % (partition.get('num'), + type(fs_options))) + if '"' in fs_options or "'" in fs_options: + raise InvalidLayout('Partition number %s: fs_format cannot have quotes' % + partition.get('num')) + partition['fs_options'] = fs_options + + for adjustment_str in options.adjust_part.split(): + adjustment = adjustment_str.split(':') + if len(adjustment) < 2: + raise InvalidAdjustment('Adjustment "%s" is incomplete' % adjustment_str) + + label = adjustment[0] + operator = adjustment[1][0] + operand = adjustment[1][1:] + ApplyPartitionAdjustment(partitions, metadata, label, operator, operand) + + return partitions + + +def ApplyPartitionAdjustment(partitions, metadata, label, operator, operand): + """Applies an adjustment to a partition specified by label + + Args: + partitions: Partition table to modify + metadata: Partition table metadata + label: The label of the partition to adjust + operator: Type of adjustment (+/-/=) + operand: How much to adjust by + """ + + partition = GetPartitionByLabel(partitions, label) + + operand_bytes = ParseHumanNumber(operand) + if operand_bytes % metadata['block_size'] == 0: + operand_blocks = operand_bytes / metadata['block_size'] + else: + raise InvalidAdjustment('Adjustment size %s not divisible by block size %s' + % (operand_bytes, metadata['block_size'])) + + if operator == '+': + partition['blocks'] += operand_blocks + partition['bytes'] += operand_bytes + elif operator == '-': + partition['blocks'] -= operand_blocks + partition['bytes'] -= operand_bytes + elif operator == '=': + partition['blocks'] = operand_blocks + partition['bytes'] = operand_bytes + else: + raise ValueError('unknown operator %s' % operator) + + if partition['type'] == 'rootfs': + # If we're adjusting a rootFS partition, we assume the full partition size + # specified is being used for the filesytem, minus the space reserved for + # the hashpad. + partition['fs_bytes'] = partition['bytes'] + partition['fs_blocks'] = partition['fs_bytes'] / metadata['fs_block_size'] + partition['blocks'] = int(partition['blocks'] * 1.15) + partition['bytes'] = partition['blocks'] * metadata['block_size'] + + +def GetPartitionTableFromConfig(options, layout_filename, image_type): + """Loads a partition table and returns a given partition table type + + Args: + options: Flags passed to the script + layout_filename: The filename to load tables from + image_type: The type of partition table to return + """ + + config = LoadPartitionConfig(layout_filename) + partitions = GetPartitionTable(options, config, image_type) + + return partitions + + +def GetScriptShell(): + """Loads and returns the skeleton script for our output script. + + Returns: + A string containing the skeleton script + """ + + script_shell_path = os.path.join(os.path.dirname(__file__), 'cgpt_shell.sh') + with open(script_shell_path, 'r') as f: + script_shell = ''.join(f.readlines()) + + # Before we return, insert the path to this tool so somebody reading the + # script later can tell where it was generated. + script_shell = script_shell.replace('@SCRIPT_GENERATOR@', script_shell_path) + + return script_shell + + +def GetFullPartitionSize(partition, metadata): + """Get the size of the partition including metadata/reserved space in bytes. + + The partition only has to be bigger for raw NAND devices. Formula: + - Add UBI per-block metadata (2 pages) if partition is UBI + - Round up to erase block size + - Add UBI per-partition metadata (4 blocks) if partition is UBI + - Add reserved erase blocks + """ + + erase_block_size = metadata.get('erase_block_size', 0) + size = partition['bytes'] + + if erase_block_size == 0: + return size + + # See "Flash space overhead" in + # http://www.linux-mtd.infradead.org/doc/ubi.html + # for overhead calculations. + is_ubi = partition.get('format') == 'ubi' + reserved_erase_blocks = partition.get('reserved_erase_blocks', 0) + page_size = metadata.get('page_size', 0) + + if is_ubi: + ubi_block_size = erase_block_size - 2 * page_size + erase_blocks = (size + ubi_block_size - 1) // ubi_block_size + size += erase_blocks * 2 * page_size + + erase_blocks = (size + erase_block_size - 1) // erase_block_size + size = erase_blocks * erase_block_size + + if is_ubi: + size += erase_block_size * 4 + + size += reserved_erase_blocks * erase_block_size + return size + + +def WriteLayoutFunction(options, sfile, func, image_type, config): + """Writes a shell script function to write out a given partition table. + + Args: + options: Flags passed to the script + sfile: File handle we're writing to + func: function of the layout: + for removable storage device: 'partition', + for the fixed storage device: 'base' + image_type: Type of image eg base/test/dev/factory_install + config: Partition configuration file object + """ + + partitions = GetPartitionTable(options, config, image_type) + metadata = GetMetadataPartition(partitions) + partition_totals = GetTableTotals(config, partitions) + + lines = [ + 'write_%s_table() {' % func, + ] + + if _HasExternalGpt(partitions): + # Read GPT from device to get size, then wipe it out and operate + # on GPT in tmpfs. We don't rely on cgpt's ability to deal + # directly with the GPT on SPI NOR flash because rewriting the + # table so many times would take a long time (>30min). + # Also, wiping out the previous GPT with create_image won't work + # for NAND and there's no equivalent via cgpt. + lines += [ + 'gptfile=$(mktemp)', + 'flashrom -r -iRW_GPT:${gptfile}', + 'gptsize=$(stat ${gptfile} --format %s)', + 'dd if=/dev/zero of=${gptfile} bs=${gptsize} count=1', + 'target="-D %d ${gptfile}"' % metadata['bytes'], + ] + else: + lines += [ + 'local target="$1"', + 'create_image "${target}" %d %s' % ( + partition_totals['min_disk_size'], + config['metadata']['block_size']), + ] + + # ${target} is referenced unquoted because it may expand into multiple + # arguments in the case of NAND + lines += [ + 'local curr=%d' % _GetStartSector(config, partitions), + '# Create the GPT headers and tables. Pad the primary ones.', + './cgpt create -p %d ${target}' % (_GetPrimaryEntryArrayLBA(config) - + (SIZE_OF_PMBR + SIZE_OF_GPT_HEADER)), + ] + + metadata = GetMetadataPartition(partitions) + # Pass 1: Set up the expanding partition size. + for partition in partitions: + if partition.get('num') == 'metadata': + continue + partition['var'] = (GetFullPartitionSize(partition, metadata) / + config['metadata']['block_size']) + + if (partition.get('type') != 'blank' and partition['num'] == 1 and + 'expand' in partition['features']): + lines += [ + 'local stateful_size=%s' % partition['blocks'], + 'if [ -b "${target}" ]; then', + ' stateful_size=$(( $(numsectors "${target}") - %d))' % ( + partition_totals['block_count']), + 'fi', + ': $(( stateful_size -= (stateful_size %% %d) ))' % ( + config['metadata']['fs_block_size']), + ] + partition['var'] = '${stateful_size}' + + # Pass 2: Write out all the cgpt add commands. + for partition in partitions: + if partition.get('num') == 'metadata': + continue + if partition['type'] != 'blank': + lines += [ + './cgpt add -i %d -b ${curr} -s %s -t %s -l "%s" ${target}' % ( + partition['num'], str(partition['var']), partition['type'], + partition['label']), + ] + + # Increment the curr counter ready for the next partition. + if partition['var'] != 0 and partition.get('num') != 'metadata': + lines += [ + ': $(( curr += %s ))' % partition['var'], + ] + + # Set default priorities and retry counter on kernel partitions. + tries = 15 + prio = 15 + # The order of partition numbers in this loop matters. + # Make sure partition #2 is the first one, since it will be marked as + # default bootable partition. + for partition in GetPartitionsByType(partitions, 'kernel'): + lines += [ + './cgpt add -i %s -S 0 -T %i -P %i ${target}' % + (partition['num'], tries, prio) + ] + prio = 0 + # When not writing 'base' function, make sure the other partitions are + # marked as non-bootable (retry count == 0), since the USB layout + # doesn't have any valid data in slots B & C. But with base function, + # called by chromeos-install script, the KERNEL A partition is replicated + # into both slots A & B, so we should leave both bootable for error + # recovery in this case. + if func != 'base': + tries = 0 + + efi_partitions = GetPartitionsByType(partitions, 'efi') + if efi_partitions: + lines += [ + './cgpt boot -p -b $2 -i %d ${target}' % efi_partitions[0]['num'], + './cgpt add -i %s -B 1 ${target}' % efi_partitions[0]['num'], + ] + else: + # Provide a PMBR all the time for boot loaders (like u-boot) + # that expect one to always be there. + lines += [ + './cgpt boot -p -b $2 ${target}', + ] + + if metadata.get('hybrid_mbr'): + lines += ['install_hybrid_mbr ${target}'] + lines += ['./cgpt show ${target}'] + + if _HasExternalGpt(partitions): + lines += ['flashrom -w -iRW_GPT:${gptfile} --fast-verify'] + + sfile.write('%s\n}\n' % '\n '.join(lines)) + + +def WritePartitionSizesFunction(options, sfile, func, image_type, config): + """Writes out the partition size variable that can be extracted by a caller. + + Args: + options: Flags passed to the script + sfile: File handle we're writing to + func: function of the layout: + for removable storage device: 'partition', + for the fixed storage device: 'base' + image_type: Type of image eg base/test/dev/factory_install + config: Partition configuration file object + """ + func_name = 'load_%s_vars' % func + lines = [ + '%s() {' % func_name, + 'DEFAULT_ROOTDEV="%s"' % config['metadata'].get('rootdev_%s' % func, ''), + ] + + partitions = GetPartitionTable(options, config, image_type) + for partition in partitions: + if partition.get('num') == 'metadata': + continue + for key in ('label', 'num'): + if key in partition: + shell_label = str(partition[key]).replace('-', '_').upper() + part_bytes = partition['bytes'] + reserved_ebs = partition.get('reserved_erase_blocks', 0) + fs_bytes = partition.get('fs_bytes', part_bytes) + part_format = partition.get('format', '') + fs_format = partition.get('fs_format', '') + fs_options = partition.get('fs_options', '') + partition_num = partition.get('num', '') + lines += [ + 'PARTITION_SIZE_%s=%s' % (shell_label, part_bytes), + ' RESERVED_EBS_%s=%s' % (shell_label, reserved_ebs), + ' DATA_SIZE_%s=%s' % (shell_label, fs_bytes), + ' FORMAT_%s=%s' % (shell_label, part_format), + ' FS_FORMAT_%s=%s' % (shell_label, fs_format), + ' FS_OPTIONS_%s="%s"' % (shell_label, fs_options), + ' PARTITION_NUM_%s="%s"' % (shell_label, partition_num), + ] + + sfile.write('%s\n}\n' % '\n '.join(lines)) + + +def GetPartitionByNumber(partitions, num): + """Given a partition table and number returns the partition object. + + Args: + partitions: List of partitions to search in + num: Number of partition to find + + Returns: + An object for the selected partition + """ + for partition in partitions: + if partition.get('num') == int(num): + return partition + + raise PartitionNotFound('Partition %s not found' % num) + + +def GetPartitionsByType(partitions, typename): + """Given a partition table and type returns the partitions of the type. + + Partitions are sorted in num order. + + Args: + partitions: List of partitions to search in + typename: The type of partitions to select + + Returns: + A list of partitions of the type + """ + out = [] + for partition in partitions: + if partition.get('type') == typename: + out.append(partition) + return sorted(out, key=lambda partition: partition.get('num')) + + +def GetMetadataPartition(partitions): + """Given a partition table returns the metadata partition object. + + Args: + partitions: List of partitions to search in + + Returns: + An object for the metadata partition + """ + for partition in partitions: + if partition.get('num') == 'metadata': + return partition + + return {} + + +def GetPartitionByLabel(partitions, label): + """Given a partition table and label returns the partition object. + + Args: + partitions: List of partitions to search in + label: Label of partition to find + + Returns: + An object for the selected partition + """ + for partition in partitions: + if 'label' not in partition: + continue + if partition['label'] == label: + return partition + + raise PartitionNotFound('Partition "%s" not found' % label) + + +def WritePartitionScript(options, image_type, layout_filename, sfilename): + """Writes a shell script with functions for the base and requested layouts. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + sfilename: Filename to write the finished script to + """ + config = LoadPartitionConfig(layout_filename) + + with open(sfilename, 'w') as f: + script_shell = GetScriptShell() + f.write(script_shell) + + for func, layout in (('base', BASE_LAYOUT), ('partition', image_type)): + WriteLayoutFunction(options, f, func, layout, config) + WritePartitionSizesFunction(options, f, func, layout, config) + + # TODO: Backwards compat. Should be killed off once we update + # cros_generate_update_payload to use the new code. + partitions = GetPartitionTable(options, config, BASE_LAYOUT) + partition = GetPartitionByLabel(partitions, 'ROOT-A') + f.write('ROOTFS_PARTITION_SIZE=%s\n' % (partition['bytes'],)) + + +def GetBlockSize(_options, layout_filename): + """Returns the partition table block size. + + Args: + options: Flags passed to the script + layout_filename: Path to partition configuration file + + Returns: + Block size of all partitions in the layout + """ + + config = LoadPartitionConfig(layout_filename) + return config['metadata']['block_size'] + + +def GetFilesystemBlockSize(_options, layout_filename): + """Returns the filesystem block size. + + This is used for all partitions in the table that have filesystems. + + Args: + options: Flags passed to the script + layout_filename: Path to partition configuration file + + Returns: + Block size of all filesystems in the layout + """ + + config = LoadPartitionConfig(layout_filename) + return config['metadata']['fs_block_size'] + + +def GetImageTypes(_options, layout_filename): + """Returns a list of all the image types in the layout. + + Args: + options: Flags passed to the script + layout_filename: Path to partition configuration file + + Returns: + List of all image types + """ + + config = LoadPartitionConfig(layout_filename) + return ' '.join(config['layouts'].keys()) + + +def GetType(options, image_type, layout_filename, num): + """Returns the type of a given partition for a given layout. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + Type of the specified partition. + """ + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + return partition.get('type') + + +def GetPartitions(options, image_type, layout_filename): + """Returns the partition numbers for the image_type. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + + Returns: + A space delimited string of partition numbers. + """ + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + return ' '.join(str(p['num']) for p in partitions + if 'num' in p and p['num'] != 'metadata') + + +def GetUUID(options, image_type, layout_filename, num): + """Returns the filesystem UUID of a given partition for a given layout type. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + UUID of specified partition. Defaults to random if not set. + """ + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + return partition.get('uuid', 'random') + + +def GetPartitionSize(options, image_type, layout_filename, num): + """Returns the partition size of a given partition for a given layout type. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + Size of selected partition in bytes + """ + + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + + return partition['bytes'] + + +def GetFilesystemFormat(options, image_type, layout_filename, num): + """Returns the filesystem format of a given partition for a given layout type. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + Format of the selected partition's filesystem + """ + + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + + return partition.get('fs_format') + + +def GetFormat(options, image_type, layout_filename, num): + """Returns the format of a given partition for a given layout type. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + Format of the selected partition's filesystem + """ + + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + + return partition.get('format') + + +def GetFilesystemOptions(options, image_type, layout_filename, num): + """Returns the filesystem options of a given partition and layout type. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + The selected partition's filesystem options + """ + + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + + return partition.get('fs_options') + + +def GetFilesystemSize(options, image_type, layout_filename, num): + """Returns the filesystem size of a given partition for a given layout type. + + If no filesystem size is specified, returns the partition size. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + Size of selected partition filesystem in bytes + """ + + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + + if 'fs_bytes' in partition: + return partition['fs_bytes'] + else: + return partition['bytes'] + + +def GetLabel(options, image_type, layout_filename, num): + """Returns the label for a given partition. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + Label of selected partition, or 'UNTITLED' if none specified + """ + + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + + if 'label' in partition: + return partition['label'] + else: + return 'UNTITLED' + + +def GetNumber(options, image_type, layout_filename, label): + """Returns the partition number of a given label. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + label: Number of the partition you want to read from + + Returns: + The number of the partition corresponding to the label. + """ + + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByLabel(partitions, label) + return partition['num'] + + +def GetReservedEraseBlocks(options, image_type, layout_filename, num): + """Returns the number of erase blocks reserved in the partition. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + num: Number of the partition you want to read from + + Returns: + Number of reserved erase blocks + """ + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + partition = GetPartitionByNumber(partitions, num) + if 'reserved_erase_blocks' in partition: + return partition['reserved_erase_blocks'] + else: + return 0 + + +def DoDebugOutput(options, image_type, layout_filename): + """Prints out a human readable disk layout in on-disk order. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + """ + config = LoadPartitionConfig(layout_filename) + partitions = GetPartitionTable(options, config, image_type) + + label_len = max([len(x['label']) for x in partitions if 'label' in x]) + type_len = max([len(x['type']) for x in partitions if 'type' in x]) + + msg = 'num:%4s label:%-*s type:%-*s size:%-10s fs_size:%-10s features:%s' + + # Print out non-layout options first. + print('Config Data') + metadata_msg = 'field:%-14s value:%s' + for key in config.keys(): + if key not in ('layouts', '_comment'): + print(metadata_msg % (key, config[key])) + + print('\n%s Layout Data' % image_type.upper()) + for partition in partitions: + if partition.get('num') == 'metadata': + continue + + size = ProduceHumanNumber(partition['bytes']) + if 'fs_bytes' in partition.iterkeys(): + fs_size = ProduceHumanNumber(partition['fs_bytes']) + else: + fs_size = 'auto' + + print(msg % ( + partition.get('num', 'auto'), + label_len, + partition.get('label', ''), + type_len, + partition.get('type', ''), + size, + fs_size, + partition.get('features', []), + )) + + +def CheckRootfsPartitionsMatch(partitions): + """Checks that rootfs partitions are substitutable with each other. + + This function asserts that either all rootfs partitions are in the same format + or none have a format, and it asserts that have the same number of reserved + erase blocks. + """ + partition_format = None + reserved_erase_blocks = -1 + for partition in partitions: + if partition.get('type') == 'rootfs': + new_format = partition.get('format', '') + new_reserved_erase_blocks = partition.get('reserved_erase_blocks', 0) + + if partition_format is None: + partition_format = new_format + reserved_erase_blocks = new_reserved_erase_blocks + + if new_format != partition_format: + raise MismatchedRootfsFormat( + 'mismatched rootfs formats: "%s" and "%s"' % + (partition_format, new_format)) + + if reserved_erase_blocks != new_reserved_erase_blocks: + raise MismatchedRootfsBlocks( + 'mismatched rootfs reserved erase block counts: %s and %s' % + (reserved_erase_blocks, new_reserved_erase_blocks)) + + +def Combinations(n, k): + """Calculate the binomial coefficient, i.e., "n choose k" + + This calculates the number of ways that k items can be chosen from + a set of size n. For example, if there are n blocks and k of them + are bad, then this returns the number of ways that the bad blocks + can be distributed over the device. + See http://en.wikipedia.org/wiki/Binomial_coefficient + + For convenience to the caller, this function allows impossible cases + as input and returns 0 for them. + """ + if k < 0 or n < k: + return 0 + return math.factorial(n) / (math.factorial(k) * math.factorial(n - k)) + + +def CheckReservedEraseBlocks(partitions): + """Checks that the reserved_erase_blocks in each partition is good. + + This function checks that a reasonable value was given for the reserved + erase block count. In particular, it checks that there's a less than + 1 in 100k probability that, if the manufacturer's maximum bad erase + block count is met, and assuming bad blocks are uniformly randomly + distributed, then more bad blocks will fall in this partition than are + reserved. Smaller partitions need a larger reserve percentage. + + We take the number of reserved blocks as a parameter in disk_layout.json + rather than just calculating the value so that it can be tweaked + explicitly along with others in squeezing the image onto flash. But + we check it so that users have an easy method for determining what's + acceptable--just try out a new value and do ./build_image. + """ + for partition in partitions: + if ('reserved_erase_blocks' in partition or + partition.get('format') in ('ubi', 'nand')): + if partition.get('bytes', 0) == 0: + continue + metadata = GetMetadataPartition(partitions) + if (not _HasBadEraseBlocks(partitions) + or 'reserved_erase_blocks' not in partition + or 'bytes' not in metadata + or 'erase_block_size' not in metadata + or 'page_size' not in metadata): + raise MissingEraseBlockField( + 'unable to check if partition %s will have too many bad blocks due ' + 'to missing metadata field' % partition['label']) + + reserved = partition['reserved_erase_blocks'] + erase_block_size = metadata['erase_block_size'] + device_erase_blocks = metadata['bytes'] / erase_block_size + device_bad_blocks = metadata['max_bad_erase_blocks'] + distributions = Combinations(device_erase_blocks, device_bad_blocks) + partition_erase_blocks = partition['bytes'] / erase_block_size + # The idea is to calculate the number of ways that there could be reserved + # or more bad blocks inside the partition, assuming that there are + # device_bad_blocks in the device in total (the worst case). To get the + # probability, we divide this count by the total number of ways that the + # bad blocks can be distributed on the whole device. To find the first + # number, we sum over increasing values for the count of bad blocks within + # the partition the number of ways that those bad blocks can be inside the + # partition, multiplied by the number of ways that the remaining blocks + # can be distributed outside of the partition. + ways_for_failure = sum( + Combinations(partition_erase_blocks, partition_bad_blocks) * + Combinations(device_erase_blocks - partition_erase_blocks, + device_bad_blocks - partition_bad_blocks) + for partition_bad_blocks + in range(reserved + 1, device_bad_blocks + 1)) + probability = (1.0 * ways_for_failure) / distributions + if probability > 0.00001: + raise ExcessFailureProbability('excessive probability %f of too many ' + 'bad blocks in partition %s' + % (probability, partition['label'])) + + +def CheckSimpleNandProperties(partitions): + """Checks that NAND partitions are erase-block-aligned and not expand""" + if not _HasBadEraseBlocks(partitions): + return + metadata = GetMetadataPartition(partitions) + for partition in partitions: + erase_block_size = metadata['erase_block_size'] + if partition['bytes'] % erase_block_size != 0: + raise UnalignedPartition( + 'partition size %s does not divide erase block size %s' % + (partition['bytes'], erase_block_size)) + if 'expand' in partition['features']: + raise ExpandNandImpossible( + 'expand partitions may not be used with raw NAND') + + +def CheckTotalSize(partitions): + """Checks that the sum size of all partitions fits within the device""" + metadata = GetMetadataPartition(partitions) + if 'bytes' not in metadata: + return + capacity = metadata['bytes'] + total = sum(GetFullPartitionSize(partition, metadata) + for partition in partitions if partition.get('num') != 'metadata') + if total > capacity: + raise ExcessPartitionSize('capacity = %d, total=%d' % (capacity, total)) + + +def Validate(options, image_type, layout_filename): + """Validates a layout file, used before reading sizes to check for errors. + + Args: + options: Flags passed to the script + image_type: Type of image eg base/test/dev/factory_install + layout_filename: Path to partition configuration file + """ + partitions = GetPartitionTableFromConfig(options, layout_filename, image_type) + CheckRootfsPartitionsMatch(partitions) + CheckTotalSize(partitions) + CheckSimpleNandProperties(partitions) + CheckReservedEraseBlocks(partitions) + + +def main(argv): + action_map = { + 'write': { + 'usage': ['', '', ''], + 'func': WritePartitionScript, + }, + 'readblocksize': { + 'usage': [''], + 'func': GetBlockSize, + }, + 'readfsblocksize': { + 'usage': [''], + 'func': GetFilesystemBlockSize, + }, + 'readpartsize': { + 'usage': ['', '', ''], + 'func': GetPartitionSize, + }, + 'readformat': { + 'usage': ['', '', ''], + 'func': GetFormat, + }, + 'readfsformat': { + 'usage': ['', '', ''], + 'func': GetFilesystemFormat, + }, + 'readfssize': { + 'usage': ['', '', ''], + 'func': GetFilesystemSize, + }, + 'readimagetypes': { + 'usage': [''], + 'func': GetImageTypes, + }, + 'readfsoptions': { + 'usage': ['', '', ''], + 'func': GetFilesystemOptions, + }, + 'readlabel': { + 'usage': ['', '', ''], + 'func': GetLabel, + }, + 'readnumber': { + 'usage': ['', '', ''], + 'func': GetNumber, + }, + 'readreservederaseblocks': { + 'usage': ['', '', ''], + 'func': GetReservedEraseBlocks, + }, + 'readtype': { + 'usage': ['', '', ''], + 'func': GetType, + }, + 'readpartitionnums': { + 'usage': ['', ''], + 'func': GetPartitions, + }, + 'readuuid': { + 'usage': ['', '', ''], + 'func': GetUUID, + }, + 'debug': { + 'usage': ['', ''], + 'func': DoDebugOutput, + }, + 'validate': { + 'usage': ['', ''], + 'func': Validate, + }, + } + + usage = """%(prog)s [options] + +For information on the JSON format, see: + http://dev.chromium.org/chromium-os/developer-guide/disk-layout-format + +The --adjust_part flag takes arguments like: +