uboot: (firmwareOdroidC2/C4) don't invoke patch tool, use patches = [] instead
https://github.com/NixOS/nixpkgs/blob/master/pkgs/stdenv/generic/setup.sh#L948 this can do it nicely. Signed-off-by: Anton Arapov <anton@deadbeef.mx>
This commit is contained in:
commit
56de2bcd43
30691 changed files with 3076956 additions and 0 deletions
107
nixos/tests/kubernetes/base.nix
Normal file
107
nixos/tests/kubernetes/base.nix
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
{ system ? builtins.currentSystem,
|
||||
config ? {},
|
||||
pkgs ? import ../../.. { inherit system config; }
|
||||
}:
|
||||
|
||||
with import ../../lib/testing-python.nix { inherit system pkgs; };
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
mkKubernetesBaseTest =
|
||||
{ name, domain ? "my.zyx", test, machines
|
||||
, extraConfiguration ? null }:
|
||||
let
|
||||
masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
|
||||
master = machines.${masterName};
|
||||
extraHosts = ''
|
||||
${master.ip} etcd.${domain}
|
||||
${master.ip} api.${domain}
|
||||
${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip} ${machineName}.${domain}") (attrNames machines)}
|
||||
'';
|
||||
wrapKubectl = with pkgs; runCommand "wrap-kubectl" { buildInputs = [ makeWrapper ]; } ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
|
||||
'';
|
||||
in makeTest {
|
||||
inherit name;
|
||||
|
||||
nodes = mapAttrs (machineName: machine:
|
||||
{ config, pkgs, lib, nodes, ... }:
|
||||
mkMerge [
|
||||
{
|
||||
boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
|
||||
virtualisation.memorySize = mkDefault 1536;
|
||||
virtualisation.diskSize = mkDefault 4096;
|
||||
networking = {
|
||||
inherit domain extraHosts;
|
||||
primaryIPAddress = mkForce machine.ip;
|
||||
|
||||
firewall = {
|
||||
allowedTCPPorts = [
|
||||
10250 # kubelet
|
||||
];
|
||||
trustedInterfaces = ["mynet"];
|
||||
|
||||
extraCommands = concatMapStrings (node: ''
|
||||
iptables -A INPUT -s ${node.config.networking.primaryIPAddress} -j ACCEPT
|
||||
'') (attrValues nodes);
|
||||
};
|
||||
};
|
||||
programs.bash.enableCompletion = true;
|
||||
environment.systemPackages = [ wrapKubectl ];
|
||||
services.flannel.iface = "eth1";
|
||||
services.kubernetes = {
|
||||
proxy.hostname = "${masterName}.${domain}";
|
||||
|
||||
easyCerts = true;
|
||||
inherit (machine) roles;
|
||||
apiserver = {
|
||||
securePort = 443;
|
||||
advertiseAddress = master.ip;
|
||||
};
|
||||
masterAddress = "${masterName}.${config.networking.domain}";
|
||||
};
|
||||
}
|
||||
(optionalAttrs (any (role: role == "master") machine.roles) {
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
443 # kubernetes apiserver
|
||||
];
|
||||
})
|
||||
(optionalAttrs (machine ? extraConfiguration) (machine.extraConfiguration { inherit config pkgs lib nodes; }))
|
||||
(optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
|
||||
]
|
||||
) machines;
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
'' + test;
|
||||
};
|
||||
|
||||
mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
|
||||
machines = {
|
||||
machine1 = {
|
||||
roles = ["master"];
|
||||
ip = "192.168.1.1";
|
||||
};
|
||||
machine2 = {
|
||||
roles = ["node"];
|
||||
ip = "192.168.1.2";
|
||||
};
|
||||
};
|
||||
} // attrs // {
|
||||
name = "kubernetes-${attrs.name}-multinode";
|
||||
});
|
||||
|
||||
mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
|
||||
machines = {
|
||||
machine1 = {
|
||||
roles = ["master" "node"];
|
||||
ip = "192.168.1.1";
|
||||
};
|
||||
};
|
||||
} // attrs // {
|
||||
name = "kubernetes-${attrs.name}-singlenode";
|
||||
});
|
||||
in {
|
||||
inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
|
||||
}
|
||||
15
nixos/tests/kubernetes/default.nix
Normal file
15
nixos/tests/kubernetes/default.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, pkgs ? import ../../.. { inherit system; }
|
||||
}:
|
||||
let
|
||||
dns = import ./dns.nix { inherit system pkgs; };
|
||||
rbac = import ./rbac.nix { inherit system pkgs; };
|
||||
# TODO kubernetes.e2e should eventually replace kubernetes.rbac when it works
|
||||
# e2e = import ./e2e.nix { inherit system pkgs; };
|
||||
in
|
||||
{
|
||||
dns-single-node = dns.singlenode.test;
|
||||
dns-multi-node = dns.multinode.test;
|
||||
rbac-single-node = rbac.singlenode.test;
|
||||
rbac-multi-node = rbac.multinode.test;
|
||||
}
|
||||
151
nixos/tests/kubernetes/dns.nix
Normal file
151
nixos/tests/kubernetes/dns.nix
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
{ system ? builtins.currentSystem, pkgs ? import ../../.. { inherit system; } }:
|
||||
with import ./base.nix { inherit system; };
|
||||
let
|
||||
domain = "my.zyx";
|
||||
|
||||
redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
metadata.labels.name = "redis";
|
||||
spec.containers = [{
|
||||
name = "redis";
|
||||
image = "redis";
|
||||
args = ["--bind" "0.0.0.0"];
|
||||
imagePullPolicy = "Never";
|
||||
ports = [{
|
||||
name = "redis-server";
|
||||
containerPort = 6379;
|
||||
}];
|
||||
}];
|
||||
});
|
||||
|
||||
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
|
||||
kind = "Service";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "redis";
|
||||
spec = {
|
||||
ports = [{port = 6379; targetPort = 6379;}];
|
||||
selector = {name = "redis";};
|
||||
};
|
||||
});
|
||||
|
||||
redisImage = pkgs.dockerTools.buildImage {
|
||||
name = "redis";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.redis pkgs.bind.host ];
|
||||
config.Entrypoint = ["/bin/redis-server"];
|
||||
};
|
||||
|
||||
probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "probe";
|
||||
metadata.labels.name = "probe";
|
||||
spec.containers = [{
|
||||
name = "probe";
|
||||
image = "probe";
|
||||
args = [ "-f" ];
|
||||
tty = true;
|
||||
imagePullPolicy = "Never";
|
||||
}];
|
||||
});
|
||||
|
||||
probeImage = pkgs.dockerTools.buildImage {
|
||||
name = "probe";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bind.host pkgs.busybox ];
|
||||
config.Entrypoint = ["/bin/tail"];
|
||||
};
|
||||
|
||||
extraConfiguration = { config, pkgs, lib, ... }: {
|
||||
environment.systemPackages = [ pkgs.bind.host ];
|
||||
services.dnsmasq.enable = true;
|
||||
services.dnsmasq.servers = [
|
||||
"/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
|
||||
];
|
||||
};
|
||||
|
||||
base = {
|
||||
name = "dns";
|
||||
inherit domain extraConfiguration;
|
||||
};
|
||||
|
||||
singleNodeTest = {
|
||||
test = ''
|
||||
# prepare machine1 for test
|
||||
machine1.wait_until_succeeds("kubectl get node machine1.${domain} | grep -w Ready")
|
||||
machine1.wait_until_succeeds(
|
||||
"${pkgs.gzip}/bin/zcat ${redisImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${redisPod}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${redisService}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"${pkgs.gzip}/bin/zcat ${probeImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${probePod}"
|
||||
)
|
||||
|
||||
# check if pods are running
|
||||
machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
|
||||
machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
|
||||
machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
|
||||
|
||||
# check dns on host (dnsmasq)
|
||||
machine1.succeed("host redis.default.svc.cluster.local")
|
||||
|
||||
# check dns inside the container
|
||||
machine1.succeed("kubectl exec probe -- /bin/host redis.default.svc.cluster.local")
|
||||
'';
|
||||
};
|
||||
|
||||
multiNodeTest = {
|
||||
test = ''
|
||||
# Node token exchange
|
||||
machine1.wait_until_succeeds(
|
||||
"cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
|
||||
)
|
||||
machine2.wait_until_succeeds(
|
||||
"cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
|
||||
)
|
||||
|
||||
# prepare machines for test
|
||||
machine1.wait_until_succeeds("kubectl get node machine2.${domain} | grep -w Ready")
|
||||
machine2.wait_until_succeeds(
|
||||
"${pkgs.gzip}/bin/zcat ${redisImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${redisPod}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${redisService}"
|
||||
)
|
||||
machine2.wait_until_succeeds(
|
||||
"${pkgs.gzip}/bin/zcat ${probeImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${probePod}"
|
||||
)
|
||||
|
||||
# check if pods are running
|
||||
machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
|
||||
machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
|
||||
machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
|
||||
|
||||
# check dns on hosts (dnsmasq)
|
||||
machine1.succeed("host redis.default.svc.cluster.local")
|
||||
machine2.succeed("host redis.default.svc.cluster.local")
|
||||
|
||||
# check dns inside the container
|
||||
machine1.succeed("kubectl exec probe -- /bin/host redis.default.svc.cluster.local")
|
||||
'';
|
||||
};
|
||||
in {
|
||||
singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
|
||||
multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
|
||||
}
|
||||
40
nixos/tests/kubernetes/e2e.nix
Normal file
40
nixos/tests/kubernetes/e2e.nix
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
{ system ? builtins.currentSystem, pkgs ? import ../../.. { inherit system; } }:
|
||||
with import ./base.nix { inherit system; };
|
||||
let
|
||||
domain = "my.zyx";
|
||||
certs = import ./certs.nix { externalDomain = domain; kubelets = ["machine1" "machine2"]; };
|
||||
kubeconfig = pkgs.writeText "kubeconfig.json" (builtins.toJSON {
|
||||
apiVersion = "v1";
|
||||
kind = "Config";
|
||||
clusters = [{
|
||||
name = "local";
|
||||
cluster.certificate-authority = "${certs.master}/ca.pem";
|
||||
cluster.server = "https://api.${domain}";
|
||||
}];
|
||||
users = [{
|
||||
name = "kubelet";
|
||||
user = {
|
||||
client-certificate = "${certs.admin}/admin.pem";
|
||||
client-key = "${certs.admin}/admin-key.pem";
|
||||
};
|
||||
}];
|
||||
contexts = [{
|
||||
context = {
|
||||
cluster = "local";
|
||||
user = "kubelet";
|
||||
};
|
||||
current-context = "kubelet-context";
|
||||
}];
|
||||
});
|
||||
|
||||
base = {
|
||||
name = "e2e";
|
||||
inherit domain certs;
|
||||
test = ''
|
||||
$machine1->succeed("e2e.test -kubeconfig ${kubeconfig} -provider local -ginkgo.focus '\\[Conformance\\]' -ginkgo.skip '\\[Flaky\\]|\\[Serial\\]'");
|
||||
'';
|
||||
};
|
||||
in {
|
||||
singlenode = mkKubernetesSingleNodeTest base;
|
||||
multinode = mkKubernetesMultiNodeTest base;
|
||||
}
|
||||
164
nixos/tests/kubernetes/rbac.nix
Normal file
164
nixos/tests/kubernetes/rbac.nix
Normal file
|
|
@ -0,0 +1,164 @@
|
|||
{ system ? builtins.currentSystem, pkgs ? import ../../.. { inherit system; } }:
|
||||
with import ./base.nix { inherit system; };
|
||||
let
|
||||
|
||||
roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
|
||||
kind = "ServiceAccount";
|
||||
apiVersion = "v1";
|
||||
metadata = {
|
||||
name = "read-only";
|
||||
namespace = "default";
|
||||
};
|
||||
});
|
||||
|
||||
roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "RoleBinding";
|
||||
metadata = {
|
||||
name = "read-pods";
|
||||
namespace = "default";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "Role";
|
||||
name = "pod-reader";
|
||||
};
|
||||
subjects = [{
|
||||
kind = "ServiceAccount";
|
||||
name = "read-only";
|
||||
namespace = "default";
|
||||
}];
|
||||
});
|
||||
|
||||
roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "Role";
|
||||
metadata = {
|
||||
name = "pod-reader";
|
||||
namespace = "default";
|
||||
};
|
||||
rules = [{
|
||||
apiGroups = [""];
|
||||
resources = ["pods"];
|
||||
verbs = ["get" "list" "watch"];
|
||||
}];
|
||||
});
|
||||
|
||||
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "kubectl";
|
||||
metadata.namespace = "default";
|
||||
metadata.labels.name = "kubectl";
|
||||
spec.serviceAccountName = "read-only";
|
||||
spec.containers = [{
|
||||
name = "kubectl";
|
||||
image = "kubectl:latest";
|
||||
command = ["/bin/tail" "-f"];
|
||||
imagePullPolicy = "Never";
|
||||
tty = true;
|
||||
}];
|
||||
});
|
||||
|
||||
kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
|
||||
kind = "Pod";
|
||||
apiVersion = "v1";
|
||||
metadata.name = "kubectl-2";
|
||||
metadata.namespace = "default";
|
||||
metadata.labels.name = "kubectl-2";
|
||||
spec.serviceAccountName = "read-only";
|
||||
spec.containers = [{
|
||||
name = "kubectl-2";
|
||||
image = "kubectl:latest";
|
||||
command = ["/bin/tail" "-f"];
|
||||
imagePullPolicy = "Never";
|
||||
tty = true;
|
||||
}];
|
||||
});
|
||||
|
||||
copyKubectl = pkgs.runCommand "copy-kubectl" { } ''
|
||||
mkdir -p $out/bin
|
||||
cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
|
||||
'';
|
||||
|
||||
kubectlImage = pkgs.dockerTools.buildImage {
|
||||
name = "kubectl";
|
||||
tag = "latest";
|
||||
contents = [ copyKubectl pkgs.busybox kubectlPod2 ];
|
||||
config.Entrypoint = ["/bin/sh"];
|
||||
};
|
||||
|
||||
base = {
|
||||
name = "rbac";
|
||||
};
|
||||
|
||||
singlenode = base // {
|
||||
test = ''
|
||||
machine1.wait_until_succeeds("kubectl get node machine1.my.zyx | grep -w Ready")
|
||||
|
||||
machine1.wait_until_succeeds(
|
||||
"${pkgs.gzip}/bin/zcat ${kubectlImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
|
||||
)
|
||||
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl apply -f ${roServiceAccount}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl apply -f ${roRole}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl apply -f ${roRoleBinding}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${kubectlPod}"
|
||||
)
|
||||
|
||||
machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
|
||||
|
||||
machine1.wait_until_succeeds("kubectl exec kubectl -- kubectl get pods")
|
||||
machine1.fail("kubectl exec kubectl -- kubectl create -f /kubectl-pod-2.json")
|
||||
machine1.fail("kubectl exec kubectl -- kubectl delete pods -l name=kubectl")
|
||||
'';
|
||||
};
|
||||
|
||||
multinode = base // {
|
||||
test = ''
|
||||
# Node token exchange
|
||||
machine1.wait_until_succeeds(
|
||||
"cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
|
||||
)
|
||||
machine2.wait_until_succeeds(
|
||||
"cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
|
||||
)
|
||||
|
||||
machine1.wait_until_succeeds("kubectl get node machine2.my.zyx | grep -w Ready")
|
||||
|
||||
machine2.wait_until_succeeds(
|
||||
"${pkgs.gzip}/bin/zcat ${kubectlImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
|
||||
)
|
||||
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl apply -f ${roServiceAccount}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl apply -f ${roRole}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl apply -f ${roRoleBinding}"
|
||||
)
|
||||
machine1.wait_until_succeeds(
|
||||
"kubectl create -f ${kubectlPod}"
|
||||
)
|
||||
|
||||
machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
|
||||
|
||||
machine1.wait_until_succeeds("kubectl exec kubectl -- kubectl get pods")
|
||||
machine1.fail("kubectl exec kubectl -- kubectl create -f /kubectl-pod-2.json")
|
||||
machine1.fail("kubectl exec kubectl -- kubectl delete pods -l name=kubectl")
|
||||
'';
|
||||
};
|
||||
|
||||
in {
|
||||
singlenode = mkKubernetesSingleNodeTest singlenode;
|
||||
multinode = mkKubernetesMultiNodeTest multinode;
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue