builddisks: Implement disk building
This is a bit raw, but functions.
This commit is contained in:
parent
7345f2b9f5
commit
0891d38975
@ -2,6 +2,7 @@
|
||||
"comment": "This file defines the set of trees that is generated by the rpm-ostree autobuilder",
|
||||
|
||||
"osname": "fedostree",
|
||||
"repo": "http://rpm-ostree.cloud.fedoraproject.org/repo",
|
||||
|
||||
"architectures": ["x86_64"],
|
||||
|
||||
|
@ -26,6 +26,7 @@ const GSystem = imports.gi.GSystem;
|
||||
const Builtin = imports.builtin;
|
||||
const ArgParse = imports.argparse;
|
||||
const ProcUtil = imports.procutil;
|
||||
const LibQA = imports.libqa;
|
||||
const GuestFish = imports.guestfish;
|
||||
|
||||
const QaMakeDisk = new Lang.Class({
|
||||
|
@ -25,6 +25,10 @@ const Params = imports.params;
|
||||
const ProcUtil = imports.procutil;
|
||||
const GuestFish = imports.guestfish;
|
||||
|
||||
const BOOT_UUID = "fdcaea3b-2775-45ef-b441-b46a4a18e8c4";
|
||||
const ROOT_UUID = "d230f7f0-99d3-4244-8bd9-665428054831";
|
||||
const SWAP_UUID = "61f066e3-ac18-464e-bcc7-e7c3a623cec1";
|
||||
|
||||
const DEFAULT_GF_PARTITION_OPTS = ['-m', '/dev/sda3', '-m', '/dev/sda1:/boot'];
|
||||
|
||||
function linuxGetMemTotalMb() {
|
||||
@ -114,26 +118,46 @@ blockdev-getss /dev/sda\n';
|
||||
let bootsizeSectors = bootsizeMb * 1024 / diskSectorsize * 1024;
|
||||
let swapsizeSectors = swapsizeMb * 1024 / diskSectorsize * 1024;
|
||||
let rootsizeSectors = diskBytesize / diskSectorsize - bootsizeSectors - swapsizeSectors - 64;
|
||||
print(Format.vprintf("boot: %s swap: %s root: %s", [bootsizeSectors, swapsizeSectors, rootsizeSectors]));
|
||||
let bootOffset = 64;
|
||||
let swapOffset = bootOffset + bootsizeSectors;
|
||||
let rootOffset = swapOffset + swapsizeSectors;
|
||||
let endOffset = rootOffset + rootsizeSectors;
|
||||
|
||||
let syslinuxPaths = ['/usr/share/syslinux/mbr.bin', '/usr/lib/syslinux/mbr.bin'].map(function (a) { return Gio.File.new_for_path(a); });
|
||||
let syslinuxPath = null;
|
||||
for (let i = 0; i < syslinuxPaths.length; i++) {
|
||||
let path = syslinuxPaths[i];
|
||||
if (path.query_exists(null)) {
|
||||
syslinuxPath = path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (syslinuxPath == null)
|
||||
throw new Error("Couldn't find syslinux mbr.bin in any of " + JSON.stringify(syslinuxPaths));
|
||||
|
||||
let [,syslinuxData,] = syslinuxPath.load_contents(cancellable);
|
||||
let syslinuxQuotedData = "";
|
||||
for (let i = 0; i < syslinuxData.length; i++) {
|
||||
syslinuxQuotedData += ("\\x" + Format.vprintf("%02x", [syslinuxData[i]]));
|
||||
}
|
||||
|
||||
let partconfig = Format.vprintf('launch\n\
|
||||
part-add /dev/sda p %s %s\n\
|
||||
part-add /dev/sda p %s %s\n\
|
||||
part-add /dev/sda p %s %s\n\
|
||||
part-set-bootable /dev/sda 1 true\n\
|
||||
mkfs ext4 /dev/sda1\n\
|
||||
set-e2label /dev/sda1 gnostree-boot\n\
|
||||
mkswap-L gnostree-swap /dev/sda2\n\
|
||||
set-e2uuid /dev/sda1 ' + BOOT_UUID + '\n\
|
||||
mkswap-U ' + SWAP_UUID + ' /dev/sda2\n\
|
||||
mkfs ext4 /dev/sda3\n\
|
||||
set-e2label /dev/sda3 gnostree-root\n\
|
||||
set-uuid /dev/sda3 ' + ROOT_UUID + '\n\
|
||||
mount /dev/sda3 /\n\
|
||||
mkdir /boot\n\
|
||||
', [bootOffset, swapOffset - 1,
|
||||
swapOffset, rootOffset - 1,
|
||||
rootOffset, endOffset - 1]);
|
||||
extlinux /boot\n',
|
||||
[bootOffset, swapOffset - 1,
|
||||
swapOffset, rootOffset - 1,
|
||||
rootOffset, endOffset - 1]);
|
||||
print("partition config: ", partconfig);
|
||||
gf.run(partconfig, cancellable);
|
||||
}
|
||||
@ -362,46 +386,3 @@ LABEL=gnostree-swap swap swap defaults 0 0\n';
|
||||
let fstabPath = ostreeOsdir.resolve_relative_path('current/etc/fstab');
|
||||
fstabPath.replace_contents(defaultFstab, null, false, Gio.FileCreateFlags.REPLACE_DESTINATION, cancellable);
|
||||
};
|
||||
|
||||
function bootloaderInstall(diskpath, workdir, osname, cancellable) {
|
||||
let qemuArgs = getDefaultQemuOptions();
|
||||
|
||||
let tmpKernelPath = workdir.get_child('kernel.img');
|
||||
let tmpInitrdPath = workdir.get_child('initrd.img');
|
||||
|
||||
let [gfmnt, mntdir] = newReadWriteMount(diskpath, cancellable);
|
||||
let ostreeArg;
|
||||
try {
|
||||
let [kernelPath, initrdPath] = _findCurrentKernel(mntdir, osname, cancellable)
|
||||
ostreeArg = _findCurrentOstreeBootArg(mntdir, cancellable);
|
||||
|
||||
// Copy
|
||||
kernelPath.copy(tmpKernelPath, 0, cancellable, null, null);
|
||||
initrdPath.copy(tmpInitrdPath, 0, cancellable, null, null);
|
||||
} finally {
|
||||
gfmnt.umount(cancellable);
|
||||
}
|
||||
|
||||
let consoleOutput = workdir.get_child('bootloader-console.out');
|
||||
|
||||
let kernelArgv = ['console=ttyS0', 'panic=1', 'root=LABEL=gnostree-root', 'rw', ostreeArg,
|
||||
'systemd.journald.forward_to_console=true',
|
||||
'systemd.unit=gnome-ostree-install-bootloader.target'];
|
||||
|
||||
qemuArgs.push.apply(qemuArgs, ['-drive', 'file=' + diskpath.get_path() + ',if=virtio',
|
||||
'-vnc', 'none',
|
||||
'-no-reboot',
|
||||
'-serial', 'file:' + consoleOutput.get_path(),
|
||||
'-chardev', 'socket,id=charmonitor,path=qemu.monitor,server,nowait',
|
||||
'-mon', 'chardev=charmonitor,id=monitor,mode=control',
|
||||
'-kernel', tmpKernelPath.get_path(),
|
||||
'-initrd', tmpInitrdPath.get_path(),
|
||||
'-append', kernelArgv.join(' ')
|
||||
]);
|
||||
|
||||
ProcUtil.runSync(qemuArgs, cancellable, { cwd: workdir.get_path(),
|
||||
logInitiation: true });
|
||||
|
||||
tmpKernelPath.delete(cancellable);
|
||||
tmpInitrdPath.delete(cancellable);
|
||||
}
|
||||
|
@ -44,6 +44,10 @@ const TaskBuild = new Lang.Class({
|
||||
|
||||
DefaultParameters: {onlyTreesMatching: null},
|
||||
|
||||
BuildState: { 'failed': 'failed',
|
||||
'successful': 'successful',
|
||||
'unchanged': 'unchanged' },
|
||||
|
||||
_composeProduct: function(ref, productName, treeName, treeData, release, architecture, cancellable) {
|
||||
let repos = ['fedora-' + release];
|
||||
if (release != 'rawhide')
|
||||
@ -57,7 +61,11 @@ const TaskBuild = new Lang.Class({
|
||||
let baseRequired = this._productData['base_required_packages'];
|
||||
packages.push.apply(packages, baseRequired);
|
||||
|
||||
print("Starting build of " + ref);
|
||||
let [,origRevision] = this.ostreeRepo.resolve_rev(ref, true);
|
||||
if (origRevision == null)
|
||||
print("Starting new build of " + ref);
|
||||
else
|
||||
print("Starting build of " + ref + " previous: " + origRevision);
|
||||
|
||||
let argv = ['rpm-ostree',
|
||||
'--workdir=' + this.workdir.get_path()];
|
||||
@ -78,9 +86,12 @@ const TaskBuild = new Lang.Class({
|
||||
proc.wait_sync_check(cancellable);
|
||||
} catch (e) {
|
||||
print("Build of " + productName + " failed");
|
||||
return false;
|
||||
return this.BuildState.failed;
|
||||
}
|
||||
return true;
|
||||
let [,newRevision] = this.ostreeRepo.resolve_rev(ref, false);
|
||||
if (origRevision == newRevision)
|
||||
return this.BuildState.unchanged;
|
||||
return this.BuildState.successful;
|
||||
},
|
||||
|
||||
execute: function(cancellable) {
|
||||
@ -93,6 +104,7 @@ const TaskBuild = new Lang.Class({
|
||||
let products = productData['products'];
|
||||
let successful = [];
|
||||
let failed = [];
|
||||
let unchanged = [];
|
||||
for (let i = 0; i < releases.length; i++) {
|
||||
for (let j = 0; j < architectures.length; j++) {
|
||||
for (let productName in products) {
|
||||
@ -105,17 +117,36 @@ const TaskBuild = new Lang.Class({
|
||||
log("Skipping " + ref + " which does not match " + this.parameters.onlyTreesMatching);
|
||||
continue;
|
||||
}
|
||||
if (this._composeProduct(ref, productName, treeName, products[productName][treeName],
|
||||
release, architecture,
|
||||
cancellable))
|
||||
successful.push(ref);
|
||||
else
|
||||
failed.push(ref);
|
||||
let result = this._composeProduct(ref, productName, treeName, products[productName][treeName],
|
||||
release, architecture,
|
||||
cancellable);
|
||||
switch (result) {
|
||||
case this.BuildState.successful: {
|
||||
successful.push(ref);
|
||||
}
|
||||
break;
|
||||
case this.BuildState.failed: {
|
||||
failed.push(ref);
|
||||
}
|
||||
break;
|
||||
case this.BuildState.unchanged: {
|
||||
unchanged.push(ref);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new Error("Invalid result from composeProduct: " + result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let productsBuilt = { successful: successful,
|
||||
failed: failed,
|
||||
unchanged: unchanged };
|
||||
let productsBuiltPath = this.builddir.get_child('products-built.json');
|
||||
JsonUtil.writeJsonFileAtomic(productsBuiltPath, productsBuilt, cancellable);
|
||||
print("Successful: " + successful.join(' '));
|
||||
print("Failed: " + failed.join(' '));
|
||||
print("Unchanged: " + unchanged.join(' '));
|
||||
}
|
||||
});
|
||||
|
@ -44,107 +44,46 @@ const TaskBuildDisks = new Lang.Class({
|
||||
TaskAfter: ['build'],
|
||||
},
|
||||
|
||||
_imageSubdir: 'images',
|
||||
_inheritPreviousDisk: true,
|
||||
_onlyTreeSuffixes: ['-runtime', '-devel-debug'],
|
||||
|
||||
_buildDiskForProduct: function(ref, cancellable) {
|
||||
let osname = this._products['osname'];
|
||||
let originRepoUrl = this._products['repo'];
|
||||
|
||||
let [,revision] = this.ostreeRepo.resolve_rev(ref, false);
|
||||
let refUnix = ref.replace(/\//g, '-');
|
||||
let diskName = refUnix + '-' + this._buildName + '.qcow2';
|
||||
let diskPath = this._imageCacheDir.get_child(diskName);
|
||||
if (!diskPath.query_exists(null))
|
||||
LibQA.createDisk(diskPath, cancellable);
|
||||
let mntdir = Gio.File.new_for_path('mnt');
|
||||
GSystem.file_ensure_directory(mntdir, true, cancellable);
|
||||
let gfmnt = new GuestFish.GuestMount(diskPath, { partitionOpts: LibQA.DEFAULT_GF_PARTITION_OPTS,
|
||||
readWrite: true });
|
||||
gfmnt.mount(mntdir, cancellable);
|
||||
try {
|
||||
LibQA.pullDeploy(mntdir, this.repo, osname, ref, revision, originRepoUrl,
|
||||
cancellable);
|
||||
} finally {
|
||||
gfmnt.umount(cancellable);
|
||||
}
|
||||
|
||||
print("Successfully updated " + diskPath.get_path() + " to " + revision);
|
||||
|
||||
this._postDiskCreation(refUnix, diskPath, cancellable);
|
||||
},
|
||||
|
||||
execute: function(cancellable) {
|
||||
let isLocal = this._buildName == 'local';
|
||||
let baseImageDir = this.workdir.resolve_relative_path(this._imageSubdir);
|
||||
GSystem.file_ensure_directory(baseImageDir, true, cancellable);
|
||||
this._imageCacheDir = this.cachedir.get_child('images');
|
||||
GSystem.file_ensure_directory(this._imageCacheDir, true, cancellable);
|
||||
|
||||
let currentImageLink = baseImageDir.get_child('current');
|
||||
|
||||
let targetImageDir = baseImageDir.get_child(this._buildName);
|
||||
if (!isLocal && targetImageDir.query_exists(null)) {
|
||||
print("Already created " + targetImageDir.get_path());
|
||||
return;
|
||||
this._products = JsonUtil.loadJson(this.workdir.get_child('products.json'), cancellable);
|
||||
let productsBuilt = JsonUtil.loadJson(this.builddir.get_child('products-built.json'), cancellable);
|
||||
let productsBuiltSuccessful = productsBuilt['successful'];
|
||||
print("Preparing to update disks for " + JSON.stringify(productsBuiltSuccessful));
|
||||
for (let i = 0; i < productsBuiltSuccessful.length; i++) {
|
||||
this._buildDiskForProduct(productsBuiltSuccessful[i], cancellable);
|
||||
}
|
||||
|
||||
print("Creating image for buildName=" + this._buildName);
|
||||
|
||||
let buildDataPath = this.builddir.get_child('build.json');
|
||||
let buildData = JsonUtil.loadJson(buildDataPath, cancellable);
|
||||
|
||||
let workImageDir = Gio.File.new_for_path('images');
|
||||
GSystem.file_ensure_directory(workImageDir, true, cancellable);
|
||||
|
||||
let destPath = workImageDir.get_child('build-' + this._buildName + '.json');
|
||||
GSystem.shutil_rm_rf(destPath, cancellable);
|
||||
GSystem.file_linkcopy(buildDataPath, destPath, Gio.FileCopyFlags.ALL_METADATA, cancellable);
|
||||
|
||||
let targets = buildData['targets'];
|
||||
|
||||
let osname = buildData['snapshot']['osname'];
|
||||
let originRepoUrl = buildData['snapshot']['repo'];
|
||||
|
||||
for (let targetName in targets) {
|
||||
let matched = false;
|
||||
for (let i = 0; i < this._onlyTreeSuffixes.length; i++) {
|
||||
if (JSUtil.stringEndswith(targetName, this._onlyTreeSuffixes[i])) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!matched)
|
||||
continue;
|
||||
let targetRevision = buildData['targets'][targetName];
|
||||
let squashedName = osname + '-' + targetName.substr(targetName.lastIndexOf('/') + 1);
|
||||
let diskName = squashedName + '.qcow2';
|
||||
let diskPath = workImageDir.get_child(diskName);
|
||||
let prevPath = currentImageLink.get_child(diskName);
|
||||
let prevExists = prevPath.query_exists(null);
|
||||
GSystem.shutil_rm_rf(diskPath, cancellable);
|
||||
let doCloneDisk = this._inheritPreviousDisk && prevExists;
|
||||
if (doCloneDisk) {
|
||||
LibQA.copyDisk(prevPath, diskPath, cancellable);
|
||||
} else {
|
||||
LibQA.createDisk(diskPath, cancellable);
|
||||
}
|
||||
let mntdir = Gio.File.new_for_path('mnt-' + squashedName);
|
||||
GSystem.file_ensure_directory(mntdir, true, cancellable);
|
||||
let gfmnt = new GuestFish.GuestMount(diskPath, { partitionOpts: LibQA.DEFAULT_GF_PARTITION_OPTS,
|
||||
readWrite: true });
|
||||
gfmnt.mount(mntdir, cancellable);
|
||||
try {
|
||||
LibQA.pullDeploy(mntdir, this.repo, osname, targetName, targetRevision, originRepoUrl,
|
||||
cancellable);
|
||||
} finally {
|
||||
gfmnt.umount(cancellable);
|
||||
}
|
||||
// Assume previous disks have successfully installed a bootloader
|
||||
if (!doCloneDisk) {
|
||||
LibQA.bootloaderInstall(diskPath, Gio.File.new_for_path('.'), osname, cancellable);
|
||||
print("Bootloader installation complete");
|
||||
}
|
||||
|
||||
this._postDiskCreation(squashedName, diskPath, cancellable);
|
||||
print("post-disk creation complete");
|
||||
}
|
||||
|
||||
if (isLocal) {
|
||||
let localImageDir = baseImageDir.get_child('local');
|
||||
GSystem.shutil_rm_rf(localImageDir, cancellable);
|
||||
GSystem.file_rename(workImageDir, localImageDir, cancellable);
|
||||
return;
|
||||
}
|
||||
|
||||
GSystem.file_rename(workImageDir, targetImageDir, cancellable);
|
||||
|
||||
let currentInfo = null;
|
||||
let oldCurrent = null;
|
||||
try {
|
||||
currentInfo = currentImageLink.query_info('standard::symlink-target', Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS, cancellable);
|
||||
} catch (e) {
|
||||
if (!e.matches(Gio.IOErrorEnum, Gio.IOErrorEnum.NOT_FOUND))
|
||||
throw e;
|
||||
}
|
||||
if (currentInfo) {
|
||||
oldCurrent = currentImageLink.get_parent().resolve_relative_path(currentInfo.get_symlink_target());
|
||||
}
|
||||
BuildUtil.atomicSymlinkSwap(baseImageDir.get_child('current'), targetImageDir, cancellable);
|
||||
if (!isLocal && oldCurrent)
|
||||
GSystem.shutil_rm_rf(oldCurrent, cancellable);
|
||||
},
|
||||
|
||||
_postDiskCreation: function(squashedName, diskPath, cancellable) {
|
||||
|
@ -41,11 +41,10 @@ const TaskSmoketest = new Lang.Class({
|
||||
TaskAfter: ['builddisks'],
|
||||
},
|
||||
|
||||
RequiredMessageIDs: ["0ce153587afa4095832d233c17a88001" // gnome-session startup ok
|
||||
RequiredMessageIDs: ["39f53479d3a045ac8e11786248231fbf" // multi-user.target
|
||||
],
|
||||
|
||||
FailedMessageIDs: ["fc2e22bc6ee647b6b90729ab34a250b1", // coredump
|
||||
"10dd2dc188b54a5e98970f56499d1f73" // gnome-session required component failed
|
||||
],
|
||||
|
||||
CompletedTag: 'smoketested'
|
||||
|
@ -27,17 +27,16 @@ const TaskZDisks = new Lang.Class({
|
||||
|
||||
TaskDef: {
|
||||
TaskName: "zdisks",
|
||||
TaskAfter: ['smoketest'],
|
||||
TaskAfter: ['build'],
|
||||
TaskScheduleMinSecs: 3*60*60, // Only do this every 3 hours
|
||||
},
|
||||
|
||||
_imageSubdir: 'images/z',
|
||||
_imageSubdir: 'images',
|
||||
_inheritPreviousDisk: false,
|
||||
_onlyTreeSuffixes: ['-runtime', '-devel-debug'],
|
||||
|
||||
_postDiskCreation: function(squashedName, diskPath, cancellable) {
|
||||
_postDiskCreation: function(unixName, diskPath, cancellable) {
|
||||
let parent = diskPath.get_parent();
|
||||
let outPath = parent.get_child(squashedName + '-' + this._buildName + '.qcow2.gz');
|
||||
let outPath = parent.get_child(diskPath.get_name() + '.gz');
|
||||
let outStream = outPath.create(Gio.FileCreateFlags.REPLACE_DESTINATION, cancellable);
|
||||
let compressor = Gio.ZlibCompressor.new(Gio.ZlibCompressorFormat.GZIP, 7);
|
||||
let outConverter = Gio.ConverterOutputStream.new(outStream, compressor);
|
||||
|
@ -578,43 +578,20 @@ const TestBase = new Lang.Class({
|
||||
},
|
||||
|
||||
execute: function(cancellable) {
|
||||
let imageDir = this.workdir.get_child('images');
|
||||
let currentImages = imageDir.get_child('current');
|
||||
|
||||
let e = currentImages.enumerate_children('standard::*', Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS,
|
||||
cancellable);
|
||||
let imageCacheDir = this.cachedir.get_child('images');
|
||||
let info;
|
||||
let buildJson;
|
||||
let disksToTest = [];
|
||||
|
||||
while ((info = e.next_file(cancellable)) != null) {
|
||||
let name = info.get_name();
|
||||
if (name.indexOf('build-') == 0 && JSUtil.stringEndswith(name, '.json')) {
|
||||
buildJson = e.get_child(info);
|
||||
continue;
|
||||
}
|
||||
if (!JSUtil.stringEndswith(name, '.qcow2'))
|
||||
continue;
|
||||
let matches = false;
|
||||
for (let i = 0; i < this.TestTrees.length; i++) {
|
||||
let tree = this.TestTrees[i];
|
||||
if (JSUtil.stringEndswith(name, tree + '.qcow2')) {
|
||||
matches = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!matches) {
|
||||
print("Skipping disk " + name + " not in " + JSON.stringify(this.TestTrees));
|
||||
continue;
|
||||
}
|
||||
disksToTest.push(name);
|
||||
}
|
||||
e.close(null);
|
||||
if (disksToTest.length == 0)
|
||||
throw new Error("Didn't find any matching .qcow2 disks in " + currentImages.get_path());
|
||||
this._buildData = null;
|
||||
if (buildJson != null)
|
||||
this._buildData = JSONUtil.loadJson(buildJson, cancellable);
|
||||
throw new Error("Didn't find any matching .qcow2 disks in " + imageCacheDir.get_path());
|
||||
for (let i = 0; i < disksToTest.length; i++) {
|
||||
let name = disksToTest[i];
|
||||
let workdirName = 'work-' + name.replace(/\.qcow2$/, '');
|
||||
@ -624,7 +601,7 @@ const TestBase = new Lang.Class({
|
||||
this.BaseRequiredMessageIDs.concat(this.RequiredMessageIDs),
|
||||
this.BaseFailedMessageIDs.concat(this.FailedMessageIDs),
|
||||
this.StatusMessageID);
|
||||
test.execute(subworkdir, this._buildData, this.repo, currentImages.get_child(name), cancellable);
|
||||
test.execute(subworkdir, this.repo, currentImages.get_child(name), cancellable);
|
||||
}
|
||||
|
||||
let buildData = this._buildData;
|
||||
|
Loading…
Reference in New Issue
Block a user