This commit is contained in:
Cat /dev/Nulo 2023-01-16 12:14:20 -03:00
parent 2ae3259171
commit 8d4e6db7d2
13 changed files with 515 additions and 37 deletions

1
.gitignore vendored
View file

@ -1,2 +1,3 @@
node_modules/
build-javascript
cache/

134
alpine.ts Normal file
View file

@ -0,0 +1,134 @@
import {
chmod,
copyFile,
mkdir,
mkdtemp,
opendir,
rm,
rmdir,
symlink,
writeFile,
} from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { cwd } from "node:process";
import { execFile } from "./helpers.js";
export class Alpine {
dir: string;
private constructor({ dir }: { dir: string }) {
this.dir = dir;
}
async mkdir(dir: string, opts?: { recursive: boolean }): Promise<void> {
await mkdir(path.join(this.dir, dir), opts);
}
async writeFile(filePath: string, content: string): Promise<void> {
await this.mkdir(path.dirname(filePath), { recursive: true });
await writeFile(path.join(this.dir, filePath), content);
}
async writeExecutable(filePath: string, content: string): Promise<void> {
await this.writeFile(filePath, content);
await chmod(path.join(this.dir, filePath), 700);
}
async sudoWriteExecutable(filePath: string, content: string): Promise<void> {
const dir = await mkdtemp(
path.join(tmpdir(), "define-alpine-sudoWriteExecutable-")
);
try {
const tmpFile = path.join(dir, path.basename(filePath));
const finalPath = path.join(this.dir, filePath);
await writeFile(tmpFile, content);
await execFile("sudo", [
"mkdir",
"--parents",
path.join(this.dir, path.dirname(filePath)),
]);
await execFile("sudo", ["mv", tmpFile, finalPath]);
await execFile("sudo", ["chmod", "700", finalPath]);
} finally {
await rm(dir, { recursive: true, force: true });
}
}
private getRelativeSymlink(
target: string,
filePath: string
): { target: string; filePath: string } {
const realFilePath = path.join(this.dir, filePath);
return {
target: path.relative(
path.dirname(realFilePath),
path.join(this.dir, target)
),
filePath: realFilePath,
};
}
async symlink(_target: string, _filePath: string): Promise<void> {
const { target, filePath } = this.getRelativeSymlink(_target, _filePath);
await symlink(target, filePath);
}
async sudoSymlink(_target: string, _filePath: string): Promise<void> {
const { target, filePath } = this.getRelativeSymlink(_target, _filePath);
await execFile("sudo", ["ln", "-s", target, filePath]);
}
async addPackages(packages: string[]): Promise<void> {
await execFile("sudo", [
"apk",
"add",
"--clean-protected",
"--root",
this.dir,
...packages,
]);
}
static async makeWorld({
dir,
packages,
}: {
dir: string;
packages?: string[];
}): Promise<Alpine> {
const apkDir = path.join(dir, "/etc/apk");
await mkdir(apkDir, { recursive: true });
// hack
{
const cacheDir = path.join(cwd(), "cache");
await mkdir("cache", { recursive: true });
await symlink(cacheDir, path.join(apkDir, "cache"));
}
{
const apkKeysDir = path.join(apkDir, "keys");
const keysSrcDir = "alpine/keys";
await mkdir(apkKeysDir);
for await (const { name } of await opendir(keysSrcDir))
await copyFile(
path.join(keysSrcDir, name),
path.join(apkKeysDir, name)
);
}
await writeFile(
path.join(apkDir, "repositories"),
[
"https://dl-cdn.alpinelinux.org/alpine/v3.17/main",
"https://dl-cdn.alpinelinux.org/alpine/v3.17/community",
].join("\n")
);
await execFile("sudo", [
"apk",
"add",
"--initdb",
"--clean-protected",
"--root",
dir,
...["alpine-baselayout", "busybox", "libc-utils", "alpine-keys"],
...(packages || []),
]);
return new Alpine({ dir });
}
}

8
helpers.ts Normal file
View file

@ -0,0 +1,8 @@
import { promisify } from "node:util";
import {
execFile as execFileCallback,
spawn as spawnCallback,
} from "node:child_process";
export const execFile = promisify(execFileCallback);
export const spawn = promisify(spawnCallback);

View file

@ -1,49 +1,50 @@
import {
execFile as execFileCallback,
spawn as spawnCallback,
} from "node:child_process";
import { copyFile, mkdir, mkdtemp, opendir, writeFile } from "node:fs/promises";
import { mkdir, mkdtemp } from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { promisify } from "node:util";
const execFile = promisify(execFileCallback);
const spawn = promisify(spawnCallback);
import { Alpine } from "./alpine.js";
import { spawn } from "./helpers.js";
import { Runit } from "./runit/index.js";
{
const rootfsDir = await mkdtemp(path.join(tmpdir(), "define-alpine-"));
console.debug(rootfsDir);
await makeWorld(rootfsDir);
const alpine = await Alpine.makeWorld({ dir: rootfsDir });
const runit = await Runit.setup(alpine);
// await makeService({
// parentDir: rootfsDir,
// name: "grafana",
// packages: ["grafana"],
// setup: async (dir) => {},
// initScript: async (dir) => {},
// });
try {
await spawn("sudo", ["chroot", rootfsDir], { stdio: "inherit" });
} catch {}
}
async function makeWorld(dir: string): Promise<void> {
const apkDir = path.join(dir, "/etc/apk");
await mkdir(apkDir, { recursive: true });
// interface Service {}
// async function makeService({
// parentDir,
// name,
// packages,
// setup,
// initScript: _initScript,
// }: {
// parentDir: string;
// name: string;
// packages?: string[];
// setup: (dir: string) => Promise<void>;
// initScript: (dir: string) => Promise<string>;
// }) {
// const rootsDir = path.join(parentDir, "/nulo/roots/");
// await mkdir(rootsDir, { recursive: true });
{
const apkKeysDir = path.join(apkDir, "keys");
const keysSrcDir = "alpine/keys";
await mkdir(apkKeysDir);
for await (const { name } of await opendir(keysSrcDir))
await copyFile(path.join(keysSrcDir, name), path.join(apkKeysDir, name));
}
// const alpine = await Alpine.makeWorld({
// dir: path.join(rootsDir, name),
// packages,
// });
await writeFile(
path.join(apkDir, "repositories"),
[
"https://dl-cdn.alpinelinux.org/alpine/v3.17/main",
"https://dl-cdn.alpinelinux.org/alpine/v3.17/community",
].join("\n")
);
await execFile("sudo", [
"apk",
"add",
"--initdb",
"--clean-protected",
"--root",
dir,
...["alpine-baselayout", "busybox", "libc-utils", "alpine-keys"],
]);
}
// await setup(alpine.dir);
// // const initScript = await _initScript(rootfsDir);
// }

View file

@ -5,7 +5,7 @@
"description": "",
"main": "index.js",
"scripts": {
"run": "esbuild --target=node18 --sourcemap --outdir=build-javascript --outbase=. *.ts && node --enable-source-maps build-javascript/index.js"
"run": "esbuild --target=node18 --sourcemap --outdir=build-javascript --outbase=. *.ts **/*.ts && node --enable-source-maps build-javascript/index.js"
},
"keywords": [],
"author": "",

123
runit/index.ts Normal file
View file

@ -0,0 +1,123 @@
import { readFile } from "fs/promises";
import path from "path";
import { Alpine } from "../alpine.js";
export class Runit {
alpine: Alpine;
private constructor({ alpine }: { alpine: Alpine }) {
this.alpine = alpine;
}
private static async getScript(name: string): Promise<string> {
return await readFile(path.join("runit/scripts", name), "utf8");
}
static async setup(alpine: Alpine): Promise<Runit> {
await alpine.mkdir("/etc/runit/runsvdir/default", { recursive: true });
await alpine.symlink(
"/etc/runit/runsvdir/default",
"/etc/runit/runsvdir/current"
);
await alpine.symlink("/etc/runit/runsvdir/current", "/etc/service");
await alpine.symlink("/run/runit/stopit", "/etc/runit/stopit");
await alpine.symlink("/run/runit/reboot", "/etc/runit/reboot");
// Estos scripts fueron robados de Void Linux
await alpine.writeExecutable(
"/etc/runit/functions",
await this.getScript("functions")
);
await alpine.writeExecutable(
"/etc/runit/core-services/00-pseudofs.sh",
await this.getScript("00-pseudofs.sh")
);
await alpine.writeExecutable(
"/etc/runit/core-services/01-static-devnodes.sh",
await this.getScript("01-static-devnodes.sh")
);
await alpine.writeExecutable(
"/etc/runit/core-services/02-udev.sh",
await this.getScript("02-udev.sh")
);
await alpine.writeExecutable(
"/etc/runit/core-services/03-filesystems.sh",
await this.getScript("03-filesystems.sh")
);
await alpine.writeExecutable(
"/etc/runit/core-services/04-swap.sh",
await this.getScript("04-swap.sh")
);
await alpine.writeExecutable(
"/etc/runit/core-services/05-misc.sh",
await this.getScript("05-misc.sh")
);
// https://wiki.gentoo.org/wiki/Runit#Reboot_and_shutdown
await alpine.sudoWriteExecutable(
"/usr/local/sbin/rpoweroff",
`#!/bin/sh
runit-init 0`
);
await alpine.sudoWriteExecutable(
"/usr/local/sbin/rreboot",
`#!/bin/sh
runit-init 6`
);
await alpine.addPackages(["runit", "eudev"]);
const runit = new Runit({ alpine });
await runit.addService(
"getty-tty1",
`#!/bin/sh
exec chpst -P getty 38400 tty1 linux`,
false
);
await runit.addService(
"getty-tty2",
`#!/bin/sh
exec chpst -P getty 38400 tty2 linux`,
false
);
await runit.addService(
"getty-ttyS0",
`#!/bin/sh
exec chpst -P getty 38400 ttyS0 linux`,
false
);
return runit;
}
async addService(
name: string,
script: string,
log: boolean = true
): Promise<void> {
const runScriptPath = path.join("/etc/sv/", name, "/run");
await this.alpine.sudoWriteExecutable(runScriptPath, script);
if (log) {
const logScriptPath = path.join("/etc/sv/", name, "/log/run");
await this.alpine.sudoWriteExecutable(
logScriptPath,
`#!/bin/sh
exec logger -p daemon.info -t '${name}'`
);
await this.alpine.symlink(
`/run/runit/supervise.${name}.log`,
path.join("/etc/sv/", name, "/log/supervise")
);
}
// Activar servicio
await this.alpine.sudoSymlink(
path.join("/etc/sv/", name),
path.join("/etc/runit/runsvdir/default/", name)
);
await this.alpine.sudoSymlink(
`/run/runit/supervise.${name}`,
path.join("/etc/sv/", name, "/supervise")
);
}
}

View file

@ -0,0 +1,48 @@
msg "Mounting pseudo-filesystems..."
mountpoint -q /proc || mount -o nosuid,noexec,nodev -t proc proc /proc
mountpoint -q /sys || mount -o nosuid,noexec,nodev -t sysfs sys /sys
mountpoint -q /run || mount -o mode=0755,nosuid,nodev -t tmpfs run /run
mountpoint -q /dev || mount -o mode=0755,nosuid -t devtmpfs dev /dev
mkdir -p -m0755 /run/runit /run/lvm /run/user /run/lock /run/log /dev/pts /dev/shm
mountpoint -q /dev/pts || mount -o mode=0620,gid=5,nosuid,noexec -n -t devpts devpts /dev/pts
mountpoint -q /dev/shm || mount -o mode=1777,nosuid,nodev -n -t tmpfs shm /dev/shm
mountpoint -q /sys/kernel/security || mount -n -t securityfs securityfs /sys/kernel/security
if [ -d /sys/firmware/efi/efivars ]; then
mountpoint -q /sys/firmware/efi/efivars || mount -o nosuid,noexec,nodev -t efivarfs efivarfs /sys/firmware/efi/efivars
fi
if [ -z "$VIRTUALIZATION" ]; then
_cgroupv1=""
_cgroupv2=""
case "${CGROUP_MODE:-hybrid}" in
legacy)
_cgroupv1="/sys/fs/cgroup"
;;
hybrid)
_cgroupv1="/sys/fs/cgroup"
_cgroupv2="${_cgroupv1}/unified"
;;
unified)
_cgroupv2="/sys/fs/cgroup"
;;
esac
# cgroup v1
if [ -n "$_cgroupv1" ]; then
mountpoint -q "$_cgroupv1" || mount -o mode=0755 -t tmpfs cgroup "$_cgroupv1"
while read -r _subsys_name _hierarchy _num_cgroups _enabled; do
[ "$_enabled" = "1" ] || continue
_controller="${_cgroupv1}/${_subsys_name}"
mkdir -p "$_controller"
mountpoint -q "$_controller" || mount -t cgroup -o "$_subsys_name" cgroup "$_controller"
done < /proc/cgroups
fi
# cgroup v2
if [ -n "$_cgroupv2" ]; then
mkdir -p "$_cgroupv2"
mountpoint -q "$_cgroupv2" || mount -t cgroup2 -o nsdelegate cgroup2 "$_cgroupv2"
fi
fi

View file

@ -0,0 +1,6 @@
# Some kernel modules must be loaded before starting udev(7).
# Load them by looking at the output of `kmod static-nodes`.
for f in $(kmod static-nodes 2>/dev/null|awk '/Module/ {print $2}'); do
modprobe -bq $f 2>/dev/null
done

15
runit/scripts/02-udev.sh Normal file
View file

@ -0,0 +1,15 @@
[ -n "$VIRTUALIZATION" ] && return 0
if [ -x /sbin/udevd -o -x /bin/udevd ]; then
_udevd=udevd
else
msg_warn "cannot find udevd!"
fi
if [ -n "${_udevd}" ]; then
msg "Starting udev and waiting for devices to settle..."
${_udevd} --daemon
udevadm trigger --action=add --type=subsystems
udevadm trigger --action=add --type=devices
udevadm settle
fi

View file

@ -0,0 +1,78 @@
[ -n "$VIRTUALIZATION" ] && return 0
#msg "Remounting rootfs read-only..."
#mount -o remount,ro / || emergency_shell
if [ -x /sbin/dmraid -o -x /bin/dmraid ]; then
msg "Activating dmraid devices..."
dmraid -i -ay
fi
if [ -x /bin/mdadm ]; then
msg "Activating software RAID arrays..."
mdadm -As
fi
if [ -x /bin/btrfs ]; then
msg "Activating btrfs devices..."
btrfs device scan || emergency_shell
fi
if [ -x /sbin/vgchange -o -x /bin/vgchange ]; then
msg "Activating LVM devices..."
vgchange --sysinit -a ay || emergency_shell
fi
if [ -e /etc/crypttab ]; then
msg "Activating encrypted devices..."
awk -f /etc/runit/crypt.awk /etc/crypttab
if [ -x /sbin/vgchange -o -x /bin/vgchange ]; then
msg "Activating LVM devices for dm-crypt..."
vgchange --sysinit -a ay || emergency_shell
fi
fi
if [ -x /usr/bin/zpool -a -x /usr/bin/zfs ]; then
if [ -e /etc/zfs/zpool.cache ]; then
msg "Importing cached ZFS pools..."
zpool import -N -a -c /etc/zfs/zpool.cache
else
msg "Scanning for and importing ZFS pools..."
zpool import -N -a -o cachefile=none
fi
msg "Mounting ZFS file systems..."
zfs mount -a -l
msg "Sharing ZFS file systems..."
zfs share -a
# NOTE(dh): ZFS has ZVOLs, block devices on top of storage pools.
# In theory, it would be possible to use these as devices in
# dmraid, btrfs, LVM and so on. In practice it's unlikely that
# anybody is doing that, so we aren't supporting it for now.
fi
[ -f /fastboot ] && FASTBOOT=1
[ -f /forcefsck ] && FORCEFSCK="-f"
for arg in $(cat /proc/cmdline); do
case $arg in
fastboot) FASTBOOT=1;;
forcefsck) FORCEFSCK="-f";;
esac
done
if [ -z "$FASTBOOT" ]; then
msg "Checking filesystems:"
fsck -A -T -a -t noopts=_netdev $FORCEFSCK
if [ $? -gt 1 ]; then
s emergency_shell
fi
fi
msg "Mounting all non-network filesystems..."
mount -a -t "nosysfs,nonfs,nonfs4,nosmbfs,nocifs" -O no_netdev || emergency_shell
# data module
msg "Creating and mounting data directories..."
/usr/local/bin/mount-data || emergency_shell

4
runit/scripts/04-swap.sh Normal file
View file

@ -0,0 +1,4 @@
[ -n "$VIRTUALIZATION" ] && return 0
msg "Initializing swap..."
swapon -a || emergency_shell

8
runit/scripts/05-misc.sh Normal file
View file

@ -0,0 +1,8 @@
install -m0664 -o root -g utmp /dev/null /run/utmp
#halt -B # for wtmp
msg "Setting up loopback interface..."
ip link set up dev lo
msg "Setting hostname..."
hostname -F /etc/hostname

52
runit/scripts/functions Normal file
View file

@ -0,0 +1,52 @@
msg() {
# bold
printf "\\033[1m=> $@\\033[m\\n"
}
msg_ok() {
# bold/green
printf "\\033[1m\\033[32m OK\\033[m\\n"
}
msg_error() {
# bold/red
printf "\\033[1m\\033[31mERROR: $@\\033[m\\n"
}
msg_warn() {
# bold/yellow
printf "\\033[1m\\033[33mWARNING: $@\\033[m\\n"
}
emergency_shell() {
echo
echo "Cannot continue due to errors above, starting emergency shell."
echo "When ready type exit to continue booting."
/bin/sh -l
}
detect_virt() {
# Detect LXC (and other) containers
[ -z "${container+x}" ] || export VIRTUALIZATION=1
}
deactivate_vgs() {
_group=${1:-All}
if [ -x /sbin/vgchange -o -x /bin/vgchange ]; then
vgs=$(vgs|wc -l)
if [ $vgs -gt 0 ]; then
msg "Deactivating $_group LVM Volume Groups..."
vgchange -an
fi
fi
}
deactivate_crypt() {
if [ -x /sbin/dmsetup -o -x /bin/dmsetup ]; then
msg "Deactivating Crypt Volumes"
for v in $(dmsetup ls --target crypt --exec "dmsetup info -c --noheadings -o open,name"); do
[ ${v%%:*} = "0" ] && cryptsetup close ${v##*:}
done
deactivate_vgs "Crypt"
fi
}