84 lines
2.6 KiB
JavaScript
84 lines
2.6 KiB
JavaScript
// TODO: sandboxear este proceso (¿seccomp? ¿minijail?)
|
|
// TODO: sandboxear mkquashfs
|
|
// TODO: fijarse como hace firecracker-containerd
|
|
// TODO: fijarse como hace [ignite](https://github.com/weaveworks/ignite)
|
|
// TODO: fijarse como hace [thi](https://github.com/thi-startup/init)
|
|
// TODO: fijarse como hace [firebuild](https://combust-labs.github.io/firebuild-docs/)
|
|
|
|
import gunzip from "gunzip-maybe";
|
|
import { spawn } from "node:child_process";
|
|
import { Duplex, Writable } from "node:stream";
|
|
import { ReadableStream } from "node:stream/web";
|
|
import { extract, pack } from "tar-stream";
|
|
|
|
/**
|
|
* Takes many tar streams and converts them to a squashfs image.
|
|
*
|
|
* ## Why?
|
|
*
|
|
* We need a way to download OCI images (composed of layers that are tarballs) to something
|
|
* that can be accessed inside a VM. I wanted to not need to copy the image each time a VM
|
|
* is made. So, having a local HTTP cache and having the agent inside the VM download it into
|
|
* a temporary filesystem would copy it. This is bad for the life of an SSD, slow for an HDD,
|
|
* and too much to save into RAM for each VM.
|
|
*
|
|
* Instead, we download the images before booting the VM and package the layers up into a
|
|
* squashfs image that can be mounted inside the VM. This way, we reuse downloaded images
|
|
* efficiently.
|
|
*
|
|
* @param {Promise<ReadableStream>[]} streams
|
|
* @param {string} output
|
|
* @param {{ content: string, headers: import("tar-stream").Headers }[]} extraFiles
|
|
*/
|
|
export async function tar2squashfs(streams, output, extraFiles) {
|
|
const child = spawn(
|
|
"mksquashfs",
|
|
[
|
|
"-",
|
|
output,
|
|
"-tar",
|
|
...["-comp", "zstd"],
|
|
...["-Xcompression-level", "3"],
|
|
],
|
|
{
|
|
// stdio: "pipe",
|
|
stdio: ["pipe", "inherit", "inherit"],
|
|
}
|
|
);
|
|
|
|
const p = pack();
|
|
p.pipe(child.stdin);
|
|
p.on("error", console.error);
|
|
|
|
// We reverse the arrays because mksquashfs ignores files if they already exist,
|
|
// so we leave the last layers first so they are the ones used instead of the last ones
|
|
for (const streamP of [...streams].reverse()) {
|
|
const stream = await streamP;
|
|
const ex = extract();
|
|
|
|
ex.on("entry", (header, stream, next) => {
|
|
stream.pipe(p.entry(header, next));
|
|
});
|
|
|
|
stream.pipeThrough(Duplex.toWeb(gunzip())).pipeTo(Writable.toWeb(ex));
|
|
|
|
await new Promise((resolve) =>
|
|
ex.on("finish", () => {
|
|
resolve(void 0);
|
|
})
|
|
);
|
|
}
|
|
|
|
for (const { headers, content } of extraFiles) {
|
|
p.entry(headers, content);
|
|
}
|
|
|
|
p.finalize();
|
|
|
|
await new Promise((resolve, reject) =>
|
|
child.on("close", (code) => {
|
|
code === 0 ? resolve(void 0) : reject(code);
|
|
})
|
|
);
|
|
}
|