Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
137 changes: 101 additions & 36 deletions test/bin/pyutils/build_bootc_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,41 @@
NEXT_REPO = common.get_env_var('NEXT_REPO')
HOME_DIR = common.get_env_var("HOME")
PULL_SECRET = common.get_env_var('PULL_SECRET', f"{HOME_DIR}/.pull-secret.json")
BIB_IMAGE = "quay.io/centos-bootc/bootc-image-builder:latest"
FORCE_REBUILD = False


def cleanup_atexit(dry_run):
common.print_msg("Running atexit cleanup")
# Terminating any running subprocesses
for pid in common.find_subprocesses():
common.print_msg(f"Terminating {pid} PID")
common.terminate_process(pid)

# Terminate running bootc image builder containers
podman_args = [
"sudo", "podman", "ps",
"--filter", f"ancestor={BIB_IMAGE}",
"--format", "{{.ID}}"
]
cids = common.run_command_in_shell(podman_args, dry_run)
if cids:
# Make sure the ids are normalized in a single line
cids = re.sub(r'\s+', ' ', cids)
common.print_msg(f"Terminating '{cids}' container(s)")
common.run_command_in_shell(["sudo", "podman", "stop", cids], dry_run)


def should_skip(file):
if os.path.exists(file):
return True
return False
if not os.path.exists(file):
return False
# Forcing the rebuild if needed
if FORCE_REBUILD:
common.print_msg(f"Forcing rebuild of '{file}'")
return False

common.print_msg(f"The '{file}' already exists, skipping")
return True


def find_latest_rpm(repo_path, version=""):
Expand Down Expand Up @@ -160,7 +189,7 @@ def process_containerfile(groupdir, containerfile, dry_run):

# Check if the target artifact exists
if should_skip(cf_targetimg):
common.record_junit(groupdir, cf_path, "containerfile", "SKIPPED")
common.record_junit(cf_path, "process-container", "SKIPPED")
return

# Create the output directories
Expand All @@ -178,6 +207,7 @@ def process_containerfile(groupdir, containerfile, dry_run):
os.path.join(IMAGEDIR, "rpm-repos")
]
common.run_command_in_shell(build_args, dry_run, logfile, logfile)
common.record_junit(cf_path, "build-container", "OK")

# Run the container export command
if os.path.exists(cf_outdir):
Expand All @@ -188,7 +218,9 @@ def process_containerfile(groupdir, containerfile, dry_run):
"-o", cf_outdir, cf_outname
]
common.run_command_in_shell(save_args, dry_run, logfile, logfile)
common.record_junit(cf_path, "save-container", "OK")
except Exception:
common.record_junit(cf_path, "process-container", "FAILED")
# Propagate the exception to the caller
raise
finally:
Expand All @@ -205,7 +237,7 @@ def process_image_bootc(groupdir, bootcfile, dry_run):

# Check if the target artifact exists
if should_skip(bf_targetiso):
common.record_junit(groupdir, bf_path, "image-bootc", "SKIPPED")
common.record_junit(bf_path, "process-bootc-image", "SKIPPED")
return

# Create the output directories
Expand All @@ -223,6 +255,7 @@ def process_image_bootc(groupdir, bootcfile, dry_run):
"--authfile", PULL_SECRET, bf_imgref
]
common.run_command_in_shell(pull_args, dry_run, logfile, logfile)
common.record_junit(bf_path, "pull-bootc-image", "OK")

# The podman command with security elevation and
# mount of output / container storage
Expand All @@ -236,13 +269,15 @@ def process_image_bootc(groupdir, bootcfile, dry_run):
]
# Add the bootc image builder command line using local images
build_args += [
"quay.io/centos-bootc/bootc-image-builder:latest",
BIB_IMAGE,
"--type", "anaconda-iso",
"--local",
bf_imgref
]
common.run_command_in_shell(build_args, dry_run, logfile, logfile)
common.record_junit(bf_path, "build-bootc-image", "OK")
except Exception:
common.record_junit(bf_path, "process-bootc-image", "FAILED")
# Propagate the exception to the caller
raise
finally:
Expand All @@ -257,20 +292,28 @@ def process_image_bootc(groupdir, bootcfile, dry_run):
os.rename(f"{bf_outdir}/bootiso/install.iso", bf_targetiso)


def process_group(groupdir, dry_run=False):
def process_group(groupdir, build_type, dry_run=False):
futures = []
# Parallel processing loop
with concurrent.futures.ProcessPoolExecutor() as executor:
# Scan group directory contents sorted by length and then alphabetically
for file in sorted(os.listdir(groupdir), key=lambda i: (len(i), i)):
if file.endswith(".containerfile"):
futures += [executor.submit(process_containerfile, groupdir, file, dry_run)]
elif file.endswith(".image-bootc"):
futures += [executor.submit(process_image_bootc, groupdir, file, dry_run)]
else:
common.print_msg(f"Skipping unknown file {file}")

try:
# Open the junit file
common.start_junit(groupdir)
# Parallel processing loop
with concurrent.futures.ProcessPoolExecutor() as executor:
# Scan group directory contents sorted by length and then alphabetically
for file in sorted(os.listdir(groupdir), key=lambda i: (len(i), i)):
if file.endswith(".containerfile"):
if build_type and build_type != "containerfile":
common.print_msg(f"Skipping '{file}' due to '{build_type}' filter")
continue
futures.append(executor.submit(process_containerfile, groupdir, file, dry_run))
elif file.endswith(".image-bootc"):
if build_type and build_type != "image-bootc":
common.print_msg(f"Skipping '{file}' due to '{build_type}' filter")
continue
futures.append(executor.submit(process_image_bootc, groupdir, file, dry_run))
else:
common.print_msg(f"Skipping unknown file {file}")

# Wait for the parallel tasks to complete
for f in concurrent.futures.as_completed(futures):
common.print_msg(f"Task {f} completed")
Expand All @@ -284,54 +327,76 @@ def process_group(groupdir, dry_run=False):
common.print_msg(f"Task {f} cancelled")
# Propagate the exception to the caller
raise
finally:
# Close junit file
common.close_junit()


def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Process container files with Podman.")
parser.add_argument("-d", "--dry-run", action="store_true", help="Dry run: skip executing Podman commands.")
parser = argparse.ArgumentParser(description="Build image layers using Bootc Image Builder and Podman.")
parser.add_argument("-d", "--dry-run", action="store_true", help="Dry run: skip executing build commands.")
parser.add_argument("-f", "--force-rebuild", action="store_true", help="Force rebuilding images that already exist.")
parser.add_argument("-E", "--no-extract-images", action="store_true", help="Skip container image extraction.")
parser.add_argument("-b", "--build-type", choices=["image-bootc", "containerfile"], help="Only build images of the specified type.")
dirgroup = parser.add_mutually_exclusive_group(required=True)
dirgroup.add_argument("-l", "--layer-dir", type=str, help="Path to the layer directory to process.")
dirgroup.add_argument("-g", "--group-dir", type=str, help="Path to the group directory to process.")

args = parser.parse_args()
# Convert input directories to absolute paths
if args.group_dir:
args.group_dir = os.path.abspath(args.group_dir)
if args.layer_dir:
args.layer_dir = os.path.abspath(args.layer_dir)

success_message = False
try:
# Convert input directories to absolute paths
if args.group_dir:
args.group_dir = os.path.abspath(args.group_dir)
dir2process = args.group_dir
if args.layer_dir:
args.layer_dir = os.path.abspath(args.layer_dir)
dir2process = args.layer_dir
# Make sure the input directory exists
if not os.path.isdir(dir2process):
raise Exception(f"The input directory '{dir2process}' does not exist")
# Make sure the local RPM repository exists
if not os.path.isdir(LOCAL_REPO):
raise Exception("Run create_local_repo.sh before building images")
# Initialize force rebuild option
global FORCE_REBUILD
if args.force_rebuild:
FORCE_REBUILD = True

# Determine versions of RPM packages
set_rpm_version_info_vars()
# Prepare container lists for mirroring registries
# Prepare container image lists for mirroring registries
common.delete_file(CONTAINER_LIST)
extract_container_images(SOURCE_VERSION, LOCAL_REPO, CONTAINER_LIST, args.dry_run)
# The following images are specific to layers that use fake rpms built from source
extract_container_images(f"4.{FAKE_NEXT_MINOR_VERSION}.*", NEXT_REPO, CONTAINER_LIST, args.dry_run)
extract_container_images(PREVIOUS_RELEASE_VERSION, PREVIOUS_RELEASE_REPO, CONTAINER_LIST, args.dry_run)
extract_container_images(YMINUS2_RELEASE_VERSION, YMINUS2_RELEASE_REPO, CONTAINER_LIST, args.dry_run)
if args.no_extract_images:
common.print_msg("Skipping container image extraction")
else:
extract_container_images(SOURCE_VERSION, LOCAL_REPO, CONTAINER_LIST, args.dry_run)
# The following images are specific to layers that use fake rpms built from source
extract_container_images(f"4.{FAKE_NEXT_MINOR_VERSION}.*", NEXT_REPO, CONTAINER_LIST, args.dry_run)
extract_container_images(PREVIOUS_RELEASE_VERSION, PREVIOUS_RELEASE_REPO, CONTAINER_LIST, args.dry_run)
extract_container_images(YMINUS2_RELEASE_VERSION, YMINUS2_RELEASE_REPO, CONTAINER_LIST, args.dry_run)

# Process individual group directory
if args.group_dir:
process_group(args.group_dir, args.dry_run)
process_group(args.group_dir, args.build_type, args.dry_run)
else:
# Process layer directory contents sorted by length and then alphabetically
for item in sorted(os.listdir(args.layer_dir), key=lambda i: (len(i), i)):
item_path = os.path.join(args.layer_dir, item)
# Check if this item is a directory
if os.path.isdir(item_path):
process_group(item_path, args.dry_run)
# Success message
common.print_msg("Build complete")
process_group(item_path, args.build_type, args.dry_run)
# Toggle the success flag
success_message = True
except Exception as e:
common.print_msg(f"An error occurred: {e}")
traceback.print_exc()
sys.exit(1)
finally:
cleanup_atexit(args.dry_run)
# Exit status message
common.print_msg("Build " + ("OK" if success_message else "FAILED"))


if __name__ == "__main__":
Expand Down
110 changes: 105 additions & 5 deletions test/bin/pyutils/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,73 @@

import os
import pathlib
import psutil
import sys
import subprocess
import time
import threading
from typing import List


PUSHD_DIR_STACK = []


def record_junit(groupdir, containerfile, filetype, status):
# Implement your recording logic here
pass
JUNIT_LOGFILE = None
JUNIT_LOCK = threading.Lock()


def start_junit(groupdir):
"""Create a new junit file with the group name and timestampt header"""
# Initialize the junit log file path
global JUNIT_LOGFILE
group = basename(groupdir)
JUNIT_LOGFILE = os.path.join(get_env_var('IMAGEDIR'), "build-logs", group, "junit.xml")

print_msg(f"Creating '{JUNIT_LOGFILE}'")
# Create the output directory
create_dir(os.path.dirname(JUNIT_LOGFILE))
# Create a new junit file with a header
delete_file(JUNIT_LOGFILE)
timestamp = get_timestamp("%Y-%m-%dT%H:%M:%S")
append_file(JUNIT_LOGFILE, f'''<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="microshift-test-framework:{group}" timestamp="{timestamp}">''')


def close_junit():
"""Close the junit file"""
global JUNIT_LOGFILE
if not JUNIT_LOGFILE:
raise Exception("Attempt to close junit without starting it first")
# Close the unit
append_file(JUNIT_LOGFILE, '</testsuite>')
# Reset the junit log directory
JUNIT_LOGFILE = None


def record_junit(object, step, status):
"""Add a message for the specified object and step with OK, SKIP or FAIL status.
Recording messages is synchronized and it can be called from different threads.
"""
try:
# BEGIN CRITICAL SECTION
JUNIT_LOCK.acquire()

append_file(JUNIT_LOGFILE, f'<testcase classname="{object}" name="{step}">')
# Add a message according to the status
if status == "OK":
pass
elif status.startswith("SKIP"):
append_file(JUNIT_LOGFILE, f'<skipped message="{status}" type="{step}-skipped" />')
elif status.startswith("FAIL"):
append_file(JUNIT_LOGFILE, f'<failure message="{status}" type="${step}-failure" />')
else:
raise Exception(f"Invalid junit status '{status}'")
# Close the test case block
append_file(JUNIT_LOGFILE, '</testcase>')
except Exception:
# Propagate the exception to the caller
raise
finally:
# END CRITICAL SECTION
JUNIT_LOCK.release()


def get_timestamp(format: str = "%H:%M:%S"):
Expand Down Expand Up @@ -105,6 +160,12 @@ def read_file(file_path: str):
return content


def append_file(file_path: str, content: str):
"""Append the specified content to a file"""
with open(file_path, 'a') as file:
file.write(content)


def delete_file(file_path: str):
"""Attempt file deletion ignoring errors when a file does not exist"""
try:
Expand All @@ -116,3 +177,42 @@ def delete_file(file_path: str):
def basename(path: str):
"""Return a base name of the path"""
return pathlib.Path(path).name


def find_subprocesses(ppid=None):
"""Find and return a list of all the sub-processes of a parent PID"""
# Get current process if not specified
if not ppid:
ppid = psutil.Process().pid
# Get all child process objects recursively
children = psutil.Process(ppid).children(recursive=True)
# Collect the child process IDs
pids = []
for child in children:
pids.append(child.pid)
return pids


def terminate_process(pid, wait=True):
"""Terminate a process, waiting for 10s until it exits"""
try:
proc = psutil.Process(pid)
# Check if the process runs elevated
if proc.uids().effective == 0:
run_command(["sudo", "kill", "-TERM", f"{pid}"], False)
else:
proc.terminate()
if not wait:
return

# Wait for process to terminate
try:
proc.wait(timeout=10)
except psutil.TimeoutExpired:
print_msg(f"The {pid} PID did not exit after 10s")
except psutil.NoSuchProcess:
# Ignore non-existent processes
pass
except Exception:
# Propagate the exception to the caller
raise