This commit is contained in:
Jeffrey C. Ollie 2023-03-13 12:08:42 -05:00
commit 693a69f2bf
Signed by: jeff
GPG key ID: 6F86035A6D97044E
3 changed files with 635 additions and 0 deletions

22
containerd-config.nix Normal file
View file

@ -0,0 +1,22 @@
{ ... }: {
plugins = {
"io.containerd.runtime.v1.linux" = {
runtime = "runc";
};
"io.containerd.grpc.v1.cri" = {
cni = {
bin_dir = "/opt/cni/bin";
conf_dir = "/etc/cni/net.d";
};
containerd = {
runtimes = {
runc = {
options = {
SystemdCgroup = true;
};
};
};
};
};
};
}

42
flake.lock generated Normal file
View file

@ -0,0 +1,42 @@
{
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1668417584,
"narHash": "sha256-yeuEyxKPwsm5fIHN49L/syn9g5coxnPp3GsVquhrv5A=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "013fcdd106823416918004bb684c3c186d3c460f",
"type": "github"
},
"original": {
"id": "nixpkgs",
"ref": "nixos-unstable",
"type": "indirect"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

571
flake.nix Normal file
View file

@ -0,0 +1,571 @@
{
description = "openlens";
inputs = {
nixpkgs = {
url = "nixpkgs/nixos-unstable";
};
flake-utils = {
url = "github:numtide/flake-utils";
};
};
outputs = { self, nixpkgs, flake-utils, ... }@inputs:
flake-utils.lib.eachDefaultSystem
(system:
let
pkgs = import nixpkgs {
inherit system;
};
arch = {
"x86_64-linux" = "amd64";
"aarch64-linux" = "arm64";
}.${system};
hashes = {
kubectl = {
"amd64" = {
"1.24.8" = "sha256-+TwYdR7HFbTUQ35+zhj+kZSMcb4fJKsCot3hUPVEmFU=";
"1.23.5" = "sha256-cV2gXFaqT43wnLH52WoqosM6EjL2/RleP/zm6YpQqHk=";
"1.23.14" = "";
};
};
kubeadm = {
"amd64" = {
"1.24.8" = "sha256-n+pCtPtestpjjSBxDrt5Hd4iHmR3eT095wE0rAWMTMc=";
"1.23.5" = "sha256-juve0YfuhMlwAwdOqjR+NBMf7zrN8+WJqbAgD5Rodmc=";
"1.23.14" = "";
};
};
kubelet = {
"amd64" = {
"1.24.8" = "sha256-LaC5OFfPNSv/XR60LjTTmKWXG2OlPYaHtFF5p4VA1tY=";
"1.23.5" = "sha256-JTudsimbCbkeTAl4HOHS22utIJnPFrohAkUVn0jQ1eQ=";
"1.23.14" = "";
};
};
crictl = {
"amd64" = {
"1.24.1" = "sha256-QAbTrxELFEhlvg0wgUXh+Q97QN+gsgjYLT0dJfO3v5c=";
"1.23.0" = "sha256-t1T4PICs3HX5OroZH/Jp2mvkXQ/C0/QHlwTn0UJPHKg=";
};
};
cni = {
"amd64" = {
"0.8.7" = "sha256-l3gkky1WZ8ejeqajy7pAEApoc+e9l+g+i+g34+ev0Kg=";
};
};
};
kubePackage = pname: version: pkgs.stdenvNoCC.mkDerivation {
pname = pname;
version = version;
src = pkgs.fetchurl {
url = "https://storage.googleapis.com/kubernetes-release/release/v${version}/bin/linux/${arch}/${pname}";
sha256 = hashes.${pname}.${arch}.${version};
};
nativeBuildInputs = [
pkgs.autoPatchelfHook
pkgs.installShellFiles
];
dontUnpack = true;
dontPatch = true;
dontConfigure = true;
dontBuild = true;
installPhase = ''
$preInstall
install -d $out/bin
install $src -m 0555 $out/bin/${pname}
$postInstall
'';
postFixup = ''
installShellCompletion --cmd ${pname} \
--bash <($out/bin/${pname} completion bash) \
--fish <($out/bin/${pname} completion fish) \
--zsh <($out/bin/${pname} completion zsh)
'';
};
in
{
packages = {
kubeadm_1_23_5 = kubePackage "kubeadm" "1.23.5";
kubeadm = (version: kubePackage "kubeadm" version);
kubectl = (version: kubePackage "kubectl" version);
kubectl_1_23_5 = kubePackage "kubectl" "1.23.5";
kubelet = (version: kubePackage "kubelet" version);
fstab-cifs =
let
pname = "fstab-cifs";
buildInputs = [
pkgs.bash
pkgs.cifs-utils
pkgs.coreutils
pkgs.jq
pkgs.keyutils
pkgs.util-linux
pkgs.which
];
in
pkgs.stdenv.mkDerivation {
inherit pname;
name = pname;
src = pkgs.fetchFromGitHub {
owner = "fstab";
repo = "cifs";
rev = "3b640936ef51614d3c1ad68cba50c4db5da3d61b";
sha256 = "sha256-C5ze3CWyDxdWJ9cYWUmjKVPCrrdUYXP2JvlnRcW/lgg=";
};
nativeBuildInputs = [
pkgs.makeWrapper
];
buildInputs = buildInputs;
dontUnpack = true;
dontPatch = true;
dontConfigure = true;
dontBuild = true;
installPhase = ''
install -d $out/bin
install $src/cifs -m 0555 $out/bin/cifs
wrapProgram $out/bin/cifs --prefix PATH : ${pkgs.lib.makeBinPath buildInputs}
'';
meta = {
homepage = "https://github.com/fstab/cifs";
description = "Driver for CIFS (SMB, Samba, Windows Share) network filesystems as Kubernetes volumes.";
longDescription = ''
Docker containers running in Kubernetes have an
ephemeral file system: Once a container is
terminated, all files are gone. In order to store
persistent data in Kubernetes, you need to mount a
Persistent Volume into your container. Kubernetes
has built-in support for network filesystems found
in the most common cloud providers, like Amazon's
EBS, Microsoft's Azure disk, etc. However, some
cloud hosting services, like the Hetzner cloud,
provide network storage using the CIFS (SMB, Samba,
Windows Share) protocol, which is not natively
supported in Kubernetes.
Fortunately, Kubernetes provides Flexvolume, which
is a plugin mechanism enabling users to write their
own drivers. There are a few flexvolume drivers for
CIFS out there, but for different reasons none of
them seemed to work for me. So I wrote my own, which
can be found on github.com/fstab/cifs.
'';
license = pkgs.lib.licenses.mit;
};
};
crictl =
let
pname = "crictl";
version = "1.23.0";
in
pkgs.stdenvNoCC.mkDerivation {
inherit pname version;
src = pkgs.fetchurl {
url = "https://github.com/kubernetes-sigs/cri-tools/releases/download/v${version}/${pname}-v${version}-linux-${arch}.tar.gz";
sha256 = hashes.${pname}.${arch}.${version};
};
nativeBuildInputs = [
pkgs.autoPatchelfHook
];
setSourceRoot = "sourceRoot=`pwd`";
dontUnpack = false;
dontPatch = true;
dontConfigure = true;
dontBuild = true;
installPhase = ''
install -d $out/bin
install ${pname} -m 0555 $out/bin/${pname}
'';
};
cni =
let
pname = "cni";
version = "0.8.7";
in
pkgs.stdenv.mkDerivation {
pname = pname;
version = version;
src = pkgs.fetchurl {
url = "https://github.com/containernetworking/plugins/releases/download/v${version}/cni-plugins-linux-${arch}-v${version}.tgz";
sha256 = hashes.${pname}.${arch}.${version};
};
nativeBuildInputs = [
pkgs.autoPatchelfHook
];
setSourceRoot = "sourceRoot=`pwd`";
dontPatch = true;
dontConfigure = true;
dontBuild = true;
installPhase = ''
install -d $out/bin
install -m 0555 `ls | grep -v env-vars` $out/bin
'';
};
};
}
) // {
nixosModules.kubernetes = { config, lib, pkgs, name, ... }:
let
cfg = config.jk8s;
in
{
options = with lib; {
jk8s = mkOption {
type = with types; submodule {
options = {
enable = mkEnableOption "Kubernetes";
package_versions = mkOption {
type = submodule {
options = {
kubeadm = mkOption {
type = str;
};
kubectl = mkOption {
type = str;
};
kubelet = mkOption {
type = str;
};
};
};
};
service_subnet = mkOption {
type = str;
};
pod_subnet = mkOption {
type = str;
};
hosts = mkOption {
type = attrsOf
(
submodule {
options = {
role = mkOption {
type = types.enum [ "etcd" "master+etcd" "master" "worker" ];
};
endpoint = mkOption {
type = submodule {
options = {
address = mkOption {
type = str;
};
port = mkOption {
type = addCheck int (port: port >= 1025 && port <= 65535);
};
};
};
};
wireguard_subnet = mkOption {
type = submodule {
options = {
address = mkOption {
type = str;
};
mask = mkOption {
type = addCheck int (mask: mask >= 0 && mask <= 32);
};
};
};
};
private_key = mkOption {
type = str;
};
public_key = mkOption {
type = str;
};
pod_subnet = mkOption {
type = submodule {
options = {
address = mkOption {
type = str;
};
mask = mkOption {
type = addCheck int (mask: mask >= 0 && mask <= 32);
};
gateway = mkOption {
type = str;
};
};
};
};
};
}
);
};
};
};
};
};
config = with lib; mkIf cfg.enable (
let
kubeadm = (self.packages."x86_64-linux".kubeadm cfg.package_versions.kubeadm);
kubectl = (self.packages."x86_64-linux".kubectl cfg.package_versions.kubectl);
kubelet = (self.packages."x86_64-linux".kubelet cfg.package_versions.kubelet);
in
{
deployment.keys."private_key" = {
text = cfg.hosts.${name}.private_key;
destDir = "/etc/wireguard";
user = "root";
group = "systemd-network";
permissions = "0440";
uploadAt = "pre-activation";
};
systemd.tmpfiles.rules = [
# this is needed so that the cephfs csi module can mount cephfs volumes
"L+ /lib/modules - - - - ${pkgs.linux_latest}/lib/modules"
# link cni plugins to where kubelet expects them
"L+ /opt/cni/bin - - - - ${self.packages."x86_64-linux".cni}/bin"
];
boot.kernelModules = [
"br_netfilter"
"overlay"
"ceph"
];
boot.kernel.sysctl = {
"fs.inotify.max_user_instances" = 1024;
"fs.inotify.max_user_watches" = 4064932;
"net.bridge.bridge-nf-call-arptables" = 1;
"net.bridge.bridge-nf-call-ip6tables" = 1;
"net.bridge.bridge-nf-call-iptables" = 1;
"net.ipv4.ip_forward" = 1;
"user.max_inotify_instances" = 1024;
"user.max_inotify_watches" = 4064932;
};
networking.firewall.enable = false;
services.openssh.openFirewall = false;
systemd.enableUnifiedCgroupHierarchy = true;
environment.systemPackages =
let
endpoints = map
(
n: "https://${cfg.hosts.${n}.endpoint.address}:2379"
)
(
filter (n: cfg.hosts.${n}.role == "etcd" || cfg.hosts.${n}.role == "master+etcd") (attrNames cfg.hosts)
);
etcdctl = pkgs.writeShellScriptBin "etcdctl" ''
export ETCDCTL_API=3
export ETCDCTL_CERT=/etc/kubernetes/pki/etcd/healthcheck-client.crt
export ETCDCTL_KEY=/etc/kubernetes/pki/etcd/healthcheck-client.key
export ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt
export ETCDCTL_ENDPOINTS=${concatStringsSep "," endpoints}
exec ${pkgs.etcd}/bin/etcdctl "$@"
'';
in
[
self.packages."x86_64-linux".cni
self.packages."x86_64-linux".crictl
kubeadm
kubectl
kubelet
etcdctl
pkgs.conntrack-tools
pkgs.ethtool
pkgs.file
pkgs.iptables
pkgs.socat
pkgs.wireguard-tools
pkgs.wireshark-cli
];
virtualisation.containerd = {
enable = true;
settings = {
plugins = {
"io.containerd.runtime.v1.linux" = {
runtime = "runc";
};
"io.containerd.grpc.v1.cri" = {
cni = {
bin_dir = "${self.packages."x86_64-linux".cni}/bin";
conf_dir = "/etc/cni/net.d";
};
containerd = {
runtimes = {
runc = {
runtime_type = "io.containerd.runc.v2";
options = {
SystemdCgroup = true;
};
};
};
};
};
};
};
};
environment.etc = {
"crictl.yaml" = {
text = ''
runtime-endpoint: "unix:///run/containerd/containerd.sock"
image-endpoint: ""
timeout: 0
debug: false
pull-image-on-create: false
disable-pull-on-run: false
'';
};
"cni/net.d/10-cni0.conf" = {
text = ''
{
"cniVersion": "0.4.0",
"name": "cni0",
"type": "bridge",
"bridge": "cni0",
"isDefaultGateway": true,
"forceAddress": false,
"ipMasq": false,
"ipam": {
"type": "host-local",
"subnet": "${cfg.hosts.${name}.pod_subnet.address}/${toString cfg.hosts.${name}.pod_subnet.mask}",
"gateway": "${cfg.hosts.${name}.pod_subnet.gateway}"
}
}
'';
};
};
systemd.network.netdevs."05-cni0" = {
netdevConfig = {
Name = "cni0";
Kind = "bridge";
};
};
systemd.network.networks."05-cni0" = {
name = "cni0";
linkConfig = {
RequiredForOnline = "no";
};
networkConfig = {
DHCP = "no";
LinkLocalAddressing = "no";
LLMNR = "no";
LLDP = "no";
EmitLLDP = "no";
IPForward = "yes";
ConfigureWithoutCarrier = "yes";
};
addresses = [
{
addressConfig = {
Address = "${cfg.hosts.${name}.pod_subnet.gateway}/${toString cfg.hosts.${name}.pod_subnet.mask}";
};
}
];
};
systemd.network.netdevs."10-kube" = {
netdevConfig = {
Name = "kube";
Kind = "wireguard";
};
wireguardConfig = {
ListenPort = 24200;
PrivateKeyFile = "/etc/wireguard/private_key";
};
wireguardPeers = map
(
n: {
wireguardPeerConfig = {
PublicKey = cfg.hosts.${n}.public_key;
Endpoint = "${cfg.hosts.${n}.endpoint.address}:${toString cfg.hosts.${n}.endpoint.port}";
AllowedIPs = [
cfg.service_subnet
cfg.pod_subnet
cfg.hosts.${n}.wireguard_subnet.address
"${cfg.hosts.${n}.pod_subnet.address}/${toString cfg.hosts.${n}.pod_subnet.mask}"
];
PersistentKeepalive = 25;
};
}
)
(
filter (n: n != name) (attrNames cfg.hosts)
);
};
systemd.network.networks."10-kube" = {
name = "kube";
linkConfig = {
RequiredForOnline = "no";
};
networkConfig = {
DHCP = "no";
IPForward = "yes";
};
addresses = map
(
n: {
addressConfig = {
Address = "${cfg.hosts.${n}.wireguard_subnet.address}/${toString cfg.hosts.${n}.wireguard_subnet.mask}";
};
}
)
(
filter (n: n == name) (attrNames cfg.hosts)
);
routes = map
(
n: {
routeConfig = {
Destination = "${cfg.hosts.${n}.pod_subnet.address}/${toString cfg.hosts.${n}.pod_subnet.mask}";
Gateway = cfg.hosts.${n}.wireguard_subnet.address;
};
}
)
(
filter (n: n != name) (attrNames cfg.hosts)
);
};
systemd.services."kubelet" = {
enable = true;
description = "kubelet";
path = [
pkgs.ceph-client
pkgs.ethtool
pkgs.iproute2
pkgs.iptables
pkgs.kmod
pkgs.socat
pkgs.thin-provisioning-tools
pkgs.util-linux
];
unitConfig = {
StartLimitIntervalSec = 0;
};
serviceConfig = {
Slice = "kubernetes.slice";
CPUAccounting = true;
MemoryAccounting = true;
Type = "simple";
Environment = [
"KUBELET_KUBECONFIG_ARGS=\"--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\""
"KUBELET_CONFIG_ARGS=\"--config=/var/lib/kubelet/config.yaml\""
];
EnvironmentFile = [
"-/var/lib/kubelet/kubeadm-flags.env"
"-/etc/sysconfig/kubelet"
];
ExecStart = "${kubelet}/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS";
Restart = "always";
RestartSec = "10s";
};
wantedBy = [ "multi-user.target" ];
};
}
);
};
};
}