diff --git a/.gitignore b/.gitignore index efa6632..bb0a578 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ -bin/* \ No newline at end of file +bin/* +.vscode +coverage.out \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..6227ace --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @metal-stack/os-installer-maintainers diff --git a/pkg/network/Dockerfile.validate b/Dockerfile.validate similarity index 100% rename from pkg/network/Dockerfile.validate rename to Dockerfile.validate diff --git a/Makefile b/Makefile index c678aa4..b06cdde 100644 --- a/Makefile +++ b/Makefile @@ -26,11 +26,9 @@ binary: .PHONY: test test: - GO_ENV=testing go test -race -cover ./... + GO_ENV=testing go test ./... -race -coverpkg=./... -coverprofile=coverage.out -covermode=atomic && go tool cover -func=coverage.out .PHONY: validate validate: - cd pkg/network ./validate.sh - cd - diff --git a/README.md b/README.md index fc5d253..7239b48 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,20 @@ -# OS Installer +# os-installer -OS installer is used to configure the already untarred metal-image on the machine based on the configuration which must be located at `/etc/metal/install.yaml`. +The OS installer is used to configure a machine according to the given allocation specification, like configuring: + +- Network interfaces +- FRR configuration +- Metal user and authorized keys +- Bootloader configuration +- Ignition and cloud-init userdata +- ... + +It currently supports the officially published operating system images from the [metal-images repository](https://github.com/metal-stack/metal-images). + +The installer is executed by the [metal-hammer](https://github.com/metal-stack/metal-hammer) in a chroot, pointing to the root of the uncompressed operating system image. + +The input configuration for the installer are: + +- The `MachineDetails` as defined in [api/v1/api.go](./api/v1/api.go) +- The `MachineAllocation` as defined in the [API repository](https://github.com/metal-stack/api/) +- An optional installer `Config` as defined in [api/v1/api.go](./api/v1/api.go) (for building own images) diff --git a/api/v1/api.go b/api/v1/api.go index fb737a3..dfdd701 100644 --- a/api/v1/api.go +++ b/api/v1/api.go @@ -1,64 +1,70 @@ package v1 -import "github.com/metal-stack/metal-go/api/models" +import ( + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" +) -// Bootinfo is written by the installer in the target os to tell us -// which kernel, initrd and cmdline must be used for kexec -type Bootinfo struct { - Initrd string `yaml:"initrd"` - Cmdline string `yaml:"cmdline"` - Kernel string `yaml:"kernel"` - BootloaderID string `yaml:"bootloader_id"` -} +const ( + MachineDetailsPath = "/etc/metal/machine-details.yaml" + MachineAllocationPath = "/etc/metal/machine-allocation.yaml" + InstallerConfigPath = "/etc/metal/os-installer.yaml" + BuildMetaPath = "/etc/metal/build-meta.yaml" + BootInfoPath = "/etc/metal/boot-info.yaml" +) -// InstallerConfig contains configuration items which are -// used to install the os. -type InstallerConfig struct { - // Hostname of the machine - Hostname string `yaml:"hostname"` - // Networks all networks connected to this machine - Networks []*models.V1MachineNetwork `yaml:"networks"` - // MachineUUID is the unique UUID for this machine, usually the board serial. - MachineUUID string `yaml:"machineuuid"` - // SSHPublicKey of the user - SSHPublicKey string `yaml:"sshpublickey"` - // Password is the password for the metal user. - Password string `yaml:"password"` - // Console specifies where the kernel should connect its console to. - Console string `yaml:"console"` - // Timestamp is the the timestamp of installer config creation. - Timestamp string `yaml:"timestamp"` - // Nics are the network interfaces of this machine including their neighbors. - Nics []*models.V1MachineNic `yaml:"nics"` - // VPN is the config for connecting machine to VPN - VPN *models.V1MachineVPN `yaml:"vpn"` - // Role is either firewall or machine - Role string `yaml:"role"` - // RaidEnabled is set to true if any raid devices are specified - RaidEnabled bool `yaml:"raidenabled"` - // RootUUID is the fs uuid if the root fs - RootUUID string `yaml:"root_uuid"` - // FirewallRules if not empty firewall rules to enforce - FirewallRules *models.V1FirewallRules `yaml:"firewall_rules"` - // DNSServers for the machine - DNSServers []*models.V1DNSServer `yaml:"dns_servers"` - // NTPServers for the machine - NTPServers []*models.V1NTPServer `yaml:"ntp_servers"` -} +type ( + // Bootinfo is written by the installer in the target os to tell us + // which kernel, initrd and cmdline must be used for kexec + Bootinfo struct { + Initrd string `yaml:"initrd"` + Cmdline string `yaml:"cmdline"` + Kernel string `yaml:"kernel"` + BootloaderID string `yaml:"bootloader_id"` + } -// FIXME legacy structs remove once old images are gone + // Config can be placed inside the target OS to customize the os-installer. + Config struct { + // OsName enforces a specific os-installer implementation, defaults to auto-detection + OsName *string `yaml:"os_name"` + // Only allows to run installer tasks only with the given names + Only []string `yaml:"only"` + // Except allows to run installer tasks except for the given names + Except []string `yaml:"except"` + // CustomScript allows executing a custom script that's placed inside the OS at the end of the installer execution + CustomScript *struct { + ExecutablePath string `yaml:"executable_path"` + WorkDir string `yaml:"workdir"` + } `yaml:"custom_script"` + // Overwrites allows specifying os-installer overwrites for the default implementation + Overwrites struct { + BootloaderID *string `yaml:"bootloader_id"` + } + } -type ( - // Disk is a physical Disk - Disk struct { - // Device the name of the disk device visible from kernel side, e.g. sda - Device string - // Partitions to create on this disk, order is preserved - Partitions []Partition + // MachineDetails which are not part of the MachineAllocation but required to complete the installation. + // Is written by by the metal-hammer + MachineDetails struct { + // Id is the machine UUID + ID string `yaml:"id"` + // Nics are the nics of the machine + Nics []*apiv2.MachineNic `yaml:"nics"` + // Password is the password for the metal user. + Password string `yaml:"password"` + // Console specifies where the kernel should connect its console to. + Console string `yaml:"console"` + // RaidEnabled is set to true if any raid devices are specified + RaidEnabled bool `yaml:"raidenabled"` + // RootUUID is the fs uuid if the root fs + RootUUID string `yaml:"root_uuid"` } - Partition struct { - Label string - Filesystem string - Properties map[string]string + + // BuildMeta is written after the installation finished to store details about the installation version. + BuildMeta struct { + Version string `json:"buildVersion" yaml:"buildVersion"` + Date string `json:"buildDate" yaml:"buildDate"` + SHA string `json:"buildSHA" yaml:"buildSHA"` + Revision string `json:"buildRevision" yaml:"buildRevision"` + + IgnitionVersion string `json:"ignitionVersion" yaml:"ignitionVersion"` } ) diff --git a/api/v1/build-meta.go b/api/v1/build-meta.go deleted file mode 100644 index f0e1105..0000000 --- a/api/v1/build-meta.go +++ /dev/null @@ -1,10 +0,0 @@ -package v1 - -type BuildMeta struct { - Version string `json:"buildVersion" yaml:"buildVersion"` - Date string `json:"buildDate" yaml:"buildDate"` - SHA string `json:"buildSHA" yaml:"buildSHA"` - Revision string `json:"buildRevision" yaml:"buildRevision"` - - IgnitionVersion string `json:"ignitionVersion" yaml:"ignitionVersion"` -} diff --git a/api/v1/legacy.go b/api/v1/legacy.go new file mode 100644 index 0000000..45d9c53 --- /dev/null +++ b/api/v1/legacy.go @@ -0,0 +1,166 @@ +package v1 + +const ( + LegacyInstallPath = "/etc/metal/install.yaml" +) + +type ( + + // InstallerConfig contains legacy configuration items which are + // used to install the os. + // It must be serialized to /etc/metal/install.yaml to guarantee compatibility for older + // firewall-controller and lldpd + InstallerConfig struct { + // Hostname of the machine + Hostname string `yaml:"hostname"` + // Networks all networks connected to this machine + Networks []*V1MachineNetwork `yaml:"networks"` + // MachineUUID is the unique UUID for this machine, usually the board serial. + MachineUUID string `yaml:"machineuuid"` + // SSHPublicKey of the user + SSHPublicKey string `yaml:"sshpublickey"` + // Password is the password for the metal user. + Password string `yaml:"password"` + // Console specifies where the kernel should connect its console to. + Console string `yaml:"console"` + // Timestamp is the the timestamp of installer config creation. + Timestamp string `yaml:"timestamp"` + // Nics are the network interfaces of this machine including their neighbors. + Nics []*V1MachineNic `yaml:"nics"` + // VPN is the config for connecting machine to VPN + VPN *V1MachineVPN `yaml:"vpn"` + // Role is either firewall or machine + Role string `yaml:"role"` + // RaidEnabled is set to true if any raid devices are specified + RaidEnabled bool `yaml:"raidenabled"` + // RootUUID is the fs uuid if the root fs + RootUUID string `yaml:"root_uuid"` + // FirewallRules if not empty firewall rules to enforce + FirewallRules *V1FirewallRules `yaml:"firewall_rules"` + // DNSServers for the machine + DNSServers []*V1DNSServer `yaml:"dns_servers"` + // NTPServers for the machine + NTPServers []*V1NTPServer `yaml:"ntp_servers"` + } + + // Copies of metal-go models.V1* structs in use in Installerconfig + // to prevent the import of metal-go. + + V1MachineNetwork struct { + // ASN number for this network in the bgp configuration + // Required: true + Asn *int64 `json:"asn" yaml:"asn"` + // the destination prefixes of this network + // Required: true + Destinationprefixes []string `json:"destinationprefixes" yaml:"destinationprefixes"` + // the ip addresses of the allocated machine in this vrf + // Required: true + Ips []string `json:"ips" yaml:"ips"` + // if set to true, packets leaving this network get masqueraded behind interface ip + // Required: true + Nat *bool `json:"nat" yaml:"nat"` + // nattypev2 + // Required: true + Nattypev2 *string `json:"nattypev2" yaml:"nattypev2"` + // the networkID of the allocated machine in this vrf + // Required: true + Networkid *string `json:"networkid" yaml:"networkid"` + // the network type, types can be looked up in the network package of metal-lib + // Required: true + Networktype *string `json:"networktype" yaml:"networktype"` + // networktypev2 + // Required: true + Networktypev2 *string `json:"networktypev2" yaml:"networktypev2"` + // the prefixes of this network + // Required: true + Prefixes []string `json:"prefixes" yaml:"prefixes"` + // indicates whether this network is the private network of this machine + // Required: true + Private *bool `json:"private" yaml:"private"` + // project of this network, empty string if not project scoped + // Required: true + Projectid *string `json:"projectid" yaml:"projectid"` + // if set to true, this network can be used for underlay communication + // Required: true + Underlay *bool `json:"underlay" yaml:"underlay"` + // the vrf of the allocated machine + // Required: true + Vrf *int64 `json:"vrf" yaml:"vrf"` + } + + V1MachineNic struct { + // the unique identifier of this network interface + // Required: true + Identifier *string `json:"identifier" yaml:"identifier"` + // the mac address of this network interface + // Required: true + Mac *string `json:"mac" yaml:"mac"` + // the name of this network interface + // Required: true + Name *string `json:"name" yaml:"name"` + // the neighbors visible to this network interface + // Required: true + Neighbors []*V1MachineNic `json:"neighbors" yaml:"neighbors"` + } + + V1MachineVPN struct { + // address of VPN control plane + // Required: true + Address *string `json:"address" yaml:"address"` + // auth key used to connect to VPN + // Required: true + AuthKey *string `json:"auth_key" yaml:"auth_key"` + // connected to the VPN + // Required: true + Connected *bool `json:"connected" yaml:"connected"` + } + + V1FirewallRules struct { + // list of egress rules to be deployed during firewall allocation + Egress []*V1FirewallEgressRule `json:"egress" yaml:"egress"` + // list of ingress rules to be deployed during firewall allocation + Ingress []*V1FirewallIngressRule `json:"ingress" yaml:"ingress"` + } + + V1FirewallEgressRule struct { + // an optional comment describing what this rule is used for + Comment string `json:"comment,omitempty" yaml:"comment,omitempty"` + // the ports affected by this rule + // Required: true + Ports []int32 `json:"ports" yaml:"ports"` + // the protocol for the rule, defaults to tcp + // Enum: ["tcp","udp"] + Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` + // the cidrs affected by this rule + // Required: true + To []string `json:"to" yaml:"to"` + } + + V1FirewallIngressRule struct { + // an optional comment describing what this rule is used for + Comment string `json:"comment,omitempty" yaml:"comment,omitempty"` + // the cidrs affected by this rule + // Required: true + From []string `json:"from" yaml:"from"` + // the ports affected by this rule + // Required: true + Ports []int32 `json:"ports" yaml:"ports"` + // the protocol for the rule, defaults to tcp + // Enum: ["tcp","udp"] + Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` + // the cidrs affected by this rule + To []string `json:"to" yaml:"to"` + } + + V1DNSServer struct { + // ip address of this dns server + // Required: true + IP *string `json:"ip" yaml:"ip"` + } + + V1NTPServer struct { + // ip address or dns hostname of this ntp server + // Required: true + Address *string `json:"address" yaml:"address"` + } +) diff --git a/go.mod b/go.mod index 545529b..956ddcb 100644 --- a/go.mod +++ b/go.mod @@ -3,53 +3,49 @@ module github.com/metal-stack/os-installer go 1.26 require ( + buf.build/go/protoyaml v0.6.0 github.com/Masterminds/semver/v3 v3.4.0 + github.com/Masterminds/sprig/v3 v3.3.0 github.com/coreos/go-systemd/v22 v22.7.0 github.com/flatcar/ignition v0.36.2 github.com/google/go-cmp v0.7.0 - github.com/metal-stack/metal-go v0.43.0 - github.com/metal-stack/metal-lib v0.24.0 + github.com/google/uuid v1.6.0 + github.com/metal-stack/api v0.0.56 github.com/metal-stack/v v1.0.3 + github.com/samber/lo v1.53.0 github.com/spf13/afero v1.15.0 github.com/stretchr/testify v1.11.1 - gopkg.in/yaml.v3 v3.0.1 + go.yaml.in/yaml/v3 v3.0.4 ) require ( + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 // indirect + buf.build/go/protovalidate v1.1.3 // indirect + cel.dev/expr v0.25.1 // indirect + dario.cat/mergo v1.0.2 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect github.com/ajeddeloh/go-json v0.0.0-20160803184958-73d058cf8437 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/go-openapi/analysis v0.24.2 // indirect - github.com/go-openapi/errors v0.22.6 // indirect - github.com/go-openapi/jsonpointer v0.22.5 // indirect - github.com/go-openapi/jsonreference v0.21.5 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/spec v0.22.4 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.5 // indirect - github.com/go-openapi/swag/cmdutils v0.25.5 // indirect - github.com/go-openapi/swag/conv v0.25.5 // indirect - github.com/go-openapi/swag/fileutils v0.25.5 // indirect - github.com/go-openapi/swag/jsonname v0.25.5 // indirect - github.com/go-openapi/swag/jsonutils v0.25.5 // indirect - github.com/go-openapi/swag/loading v0.25.5 // indirect - github.com/go-openapi/swag/mangling v0.25.5 // indirect - github.com/go-openapi/swag/netutils v0.25.5 // indirect - github.com/go-openapi/swag/stringutils v0.25.5 // indirect - github.com/go-openapi/swag/typeutils v0.25.5 // indirect - github.com/go-openapi/swag/yamlutils v0.25.5 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/godbus/dbus/v5 v5.2.2 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/oklog/ulid v1.3.1 // indirect + github.com/google/cel-go v0.27.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/spf13/cast v1.10.0 // indirect github.com/vincent-petithory/dataurl v1.0.0 // indirect - go.mongodb.org/mongo-driver v1.17.9 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect go4.org v0.0.0-20260112195520-a5071408f32f // indirect - golang.org/x/net v0.51.0 // indirect - golang.org/x/sys v0.41.0 // indirect - golang.org/x/text v0.34.0 // indirect + golang.org/x/crypto v0.49.0 // indirect + golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect + golang.org/x/sys v0.42.0 // indirect + golang.org/x/text v0.35.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260316180232-0b37fe3546d5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260316180232-0b37fe3546d5 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 58bedb8..ad808dc 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,26 @@ +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 h1:PMmTMyvHScV9Mn8wc6ASge9uRcHy0jtqPd+fM35LmsQ= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM= +buf.build/go/protovalidate v1.1.3 h1:m2GVEgQWd7rk+vIoAZ+f0ygGjvQTuqPQapBBdcpWVPE= +buf.build/go/protovalidate v1.1.3/go.mod h1:9XIuohWz+kj+9JVn3WQneHA5LZP50mjvneZMnbLkiIE= +buf.build/go/protoyaml v0.6.0 h1:Nzz1lvcXF8YgNZXk+voPPwdU8FjDPTUV4ndNTXN0n2w= +buf.build/go/protoyaml v0.6.0/go.mod h1:RgUOsBu/GYKLDSIRgQXniXbNgFlGEZnQpRAUdLAFV2Q= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/ajeddeloh/go-json v0.0.0-20160803184958-73d058cf8437 h1:gZCtZ+Hh/e3CGEX8q/yAcp8wWu5ZS6NMk6VGzpQhI3s= github.com/ajeddeloh/go-json v0.0.0-20160803184958-73d058cf8437/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/aws/aws-sdk-go v1.8.39/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= +github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= github.com/coreos/go-semver v0.1.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -16,84 +34,56 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/flatcar/ignition v0.36.2 h1:xGHgScUe0P4Fkprjqv7L2CE58emiQgP833OCCn9z2v4= github.com/flatcar/ignition v0.36.2/go.mod h1:uk1tpzLFRXus4RrvzgMI+IqmmB8a/RGFSBlI+tMTbbA= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-openapi/analysis v0.24.2 h1:6p7WXEuKy1llDgOH8FooVeO+Uq2za9qoAOq4ZN08B50= -github.com/go-openapi/analysis v0.24.2/go.mod h1:x27OOHKANE0lutg2ml4kzYLoHGMKgRm1Cj2ijVOjJuE= -github.com/go-openapi/errors v0.22.6 h1:eDxcf89O8odEnohIXwEjY1IB4ph5vmbUsBMsFNwXWPo= -github.com/go-openapi/errors v0.22.6/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.5 h1:8on/0Yp4uTb9f4XvTrM2+1CPrV05QPZXu+rvu2o9jcA= -github.com/go-openapi/jsonpointer v0.22.5/go.mod h1:gyUR3sCvGSWchA2sUBJGluYMbe1zazrYWIkWPjjMUY0= -github.com/go-openapi/jsonreference v0.21.5 h1:6uCGVXU/aNF13AQNggxfysJ+5ZcU4nEAe+pJyVWRdiE= -github.com/go-openapi/jsonreference v0.21.5/go.mod h1:u25Bw85sX4E2jzFodh1FOKMTZLcfifd1Q+iKKOUxExw= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/spec v0.22.4 h1:4pxGjipMKu0FzFiu/DPwN3CTBRlVM2yLf/YTWorYfDQ= -github.com/go-openapi/spec v0.22.4/go.mod h1:WQ6Ai0VPWMZgMT4XySjlRIE6GP1bGQOtEThn3gcWLtQ= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.5 h1:pNkwbUEeGwMtcgxDr+2GBPAk4kT+kJ+AaB+TMKAg+TU= -github.com/go-openapi/swag v0.25.5/go.mod h1:B3RT6l8q7X803JRxa2e59tHOiZlX1t8viplOcs9CwTA= -github.com/go-openapi/swag/cmdutils v0.25.5 h1:yh5hHrpgsw4NwM9KAEtaDTXILYzdXh/I8Whhx9hKj7c= -github.com/go-openapi/swag/cmdutils v0.25.5/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.5 h1:wAXBYEXJjoKwE5+vc9YHhpQOFj2JYBMF2DUi+tGu97g= -github.com/go-openapi/swag/conv v0.25.5/go.mod h1:CuJ1eWvh1c4ORKx7unQnFGyvBbNlRKbnRyAvDvzWA4k= -github.com/go-openapi/swag/fileutils v0.25.5 h1:B6JTdOcs2c0dBIs9HnkyTW+5gC+8NIhVBUwERkFhMWk= -github.com/go-openapi/swag/fileutils v0.25.5/go.mod h1:V3cT9UdMQIaH4WiTrUc9EPtVA4txS0TOmRURmhGF4kc= -github.com/go-openapi/swag/jsonname v0.25.5 h1:8p150i44rv/Drip4vWI3kGi9+4W9TdI3US3uUYSFhSo= -github.com/go-openapi/swag/jsonname v0.25.5/go.mod h1:jNqqikyiAK56uS7n8sLkdaNY/uq6+D2m2LANat09pKU= -github.com/go-openapi/swag/jsonutils v0.25.5 h1:XUZF8awQr75MXeC+/iaw5usY/iM7nXPDwdG3Jbl9vYo= -github.com/go-openapi/swag/jsonutils v0.25.5/go.mod h1:48FXUaz8YsDAA9s5AnaUvAmry1UcLcNVWUjY42XkrN4= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5 h1:SX6sE4FrGb4sEnnxbFL/25yZBb5Hcg1inLeErd86Y1U= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5/go.mod h1:/2KvOTrKWjVA5Xli3DZWdMCZDzz3uV/T7bXwrKWPquo= -github.com/go-openapi/swag/loading v0.25.5 h1:odQ/umlIZ1ZVRteI6ckSrvP6e2w9UTF5qgNdemJHjuU= -github.com/go-openapi/swag/loading v0.25.5/go.mod h1:I8A8RaaQ4DApxhPSWLNYWh9NvmX2YKMoB9nwvv6oW6g= -github.com/go-openapi/swag/mangling v0.25.5 h1:hyrnvbQRS7vKePQPHHDso+k6CGn5ZBs5232UqWZmJZw= -github.com/go-openapi/swag/mangling v0.25.5/go.mod h1:6hadXM/o312N/h98RwByLg088U61TPGiltQn71Iw0NY= -github.com/go-openapi/swag/netutils v0.25.5 h1:LZq2Xc2QI8+7838elRAaPCeqJnHODfSyOa7ZGfxDKlU= -github.com/go-openapi/swag/netutils v0.25.5/go.mod h1:lHbtmj4m57APG/8H7ZcMMSWzNqIQcu0RFiXrPUara14= -github.com/go-openapi/swag/stringutils v0.25.5 h1:NVkoDOA8YBgtAR/zvCx5rhJKtZF3IzXcDdwOsYzrB6M= -github.com/go-openapi/swag/stringutils v0.25.5/go.mod h1:PKK8EZdu4QJq8iezt17HM8RXnLAzY7gW0O1KKarrZII= -github.com/go-openapi/swag/typeutils v0.25.5 h1:EFJ+PCga2HfHGdo8s8VJXEVbeXRCYwzzr9u4rJk7L7E= -github.com/go-openapi/swag/typeutils v0.25.5/go.mod h1:itmFmScAYE1bSD8C4rS0W+0InZUBrB2xSPbWt6DLGuc= -github.com/go-openapi/swag/yamlutils v0.25.5 h1:kASCIS+oIeoc55j28T4o8KwlV2S4ZLPT6G0iq2SSbVQ= -github.com/go-openapi/swag/yamlutils v0.25.5/go.mod h1:Gek1/SjjfbYvM+Iq4QGwa/2lEXde9n2j4a3wI3pNuOQ= -github.com/go-openapi/testify/enable/yaml/v2 v2.4.0 h1:7SgOMTvJkM8yWrQlU8Jm18VeDPuAvB/xWrdxFJkoFag= -github.com/go-openapi/testify/enable/yaml/v2 v2.4.0/go.mod h1:14iV8jyyQlinc9StD7w1xVPW3CO3q1Gj04Jy//Kw4VM= -github.com/go-openapi/testify/v2 v2.4.0 h1:8nsPrHVCWkQ4p8h1EsRVymA2XABB4OT40gcvAu+voFM= -github.com/go-openapi/testify/v2 v2.4.0/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= -github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ= github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= +github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= +github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/metal-stack/metal-go v0.43.0 h1:uODD0YCwnAYzyvFxWNakZrymBoMz1FAvP5hkhsR83VQ= -github.com/metal-stack/metal-go v0.43.0/go.mod h1:GSfXrAj55LGsUSMHWGDsmq5n056NG0yb1JM8bgfvKOw= -github.com/metal-stack/metal-lib v0.24.0 h1:wvQQPWIXcA2tP+I6zAHUNdtVLLJfQnnV9yG2SoqUkz4= -github.com/metal-stack/metal-lib v0.24.0/go.mod h1:oITaqj/BtB9vDKM66jCXkeA+4D0eTZElgIKal5vtiNY= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/metal-stack/api v0.0.56 h1:wrW2zUKAOQd2qsRMyEg4Km7jkI688OZGzqas9agxMro= +github.com/metal-stack/api v0.0.56/go.mod h1:hEgtKVD7UnUwUExdA7pbFvVRxNRxSGUnU+bZce46//c= github.com/metal-stack/v v1.0.3 h1:Sh2oBlnxrCUD+mVpzfC8HiqL045YWkxs0gpTvkjppqs= github.com/metal-stack/v v1.0.3/go.mod h1:YTahEu7/ishwpYKnp/VaW/7nf8+PInogkfGwLcGPdXg= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8= +github.com/rodaine/protogofakeit v0.1.1/go.mod h1:pXn/AstBYMaSfc1/RqH3N82pBuxtWgejz1AlYpY1mI0= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/samber/lo v1.53.0 h1:t975lj2py4kJPQ6haz1QMgtId2gtmfktACxIXArw3HM= +github.com/samber/lo v1.53.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sigma/bdoor v0.0.0-20160202064022-babf2a4017b0/go.mod h1:WBu7REWbxC/s/J06jsk//d+9DOz9BbsmcIrimuGRFbs= github.com/sigma/vmw-guestinfo v0.0.0-20160204083807-95dd4126d6e8/go.mod h1:JrRFFC0veyh0cibh0DAhriSY7/gV3kDdNaVUOmfx01U= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= @@ -102,29 +92,36 @@ github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8A github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= -go.mongodb.org/mongo-driver v1.17.9 h1:IexDdCuuNJ3BHrELgBlyaH9p60JXAvdzWR128q+U5tU= -go.mongodb.org/mongo-driver v1.17.9/go.mod h1:LlOhpH5NUEfhxcAwG0UEkMqwYcc4JU18gtCdGudk/tQ= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go4.org v0.0.0-20160314031811-03efcb870d84/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20260112195520-a5071408f32f h1:ziUVAjmTPwQMBmYR1tbdRFJPtTcQUI12fH9QQjfb0Sw= go4.org v0.0.0-20260112195520-a5071408f32f/go.mod h1:ZRJnO5ZI4zAwMFp+dS1+V6J6MSyAowhRqAE+DPa1Xp0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= -golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= -golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +google.golang.org/genproto/googleapis/api v0.0.0-20260316180232-0b37fe3546d5 h1:CogIeEXn4qWYzzQU0QqvYBM8yDF9cFYzDq9ojSpv0Js= +google.golang.org/genproto/googleapis/api v0.0.0-20260316180232-0b37fe3546d5/go.mod h1:EIQZ5bFCfRQDV4MhRle7+OgjNtZ6P1PiZBgAKuxXu/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260316180232-0b37fe3546d5 h1:aJmi6DVGGIStN9Mobk/tZOOQUBbj0BPjZjjnOdoZKts= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260316180232-0b37fe3546d5/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/install.go b/install.go deleted file mode 100644 index 5362773..0000000 --- a/install.go +++ /dev/null @@ -1,888 +0,0 @@ -package main - -import ( - "fmt" - "io/fs" - "log/slog" - "os" - "os/exec" - "os/user" - "path" - "strconv" - "strings" - "time" - - ignitionConfig "github.com/flatcar/ignition/config/v2_4" - "github.com/metal-stack/metal-go/api/models" - v1 "github.com/metal-stack/os-installer/api/v1" - "github.com/metal-stack/os-installer/pkg/network" - "github.com/metal-stack/os-installer/templates" - "github.com/metal-stack/v" - "github.com/spf13/afero" - "gopkg.in/yaml.v3" -) - -const ( - installYAML = "/etc/metal/install.yaml" - userdata = "/etc/metal/userdata" -) - -func runFromCI() bool { - ciEnv := os.Getenv("INSTALL_FROM_CI") - - ci, err := strconv.ParseBool(ciEnv) - if err != nil { - return false - } - - return ci -} - -type installer struct { - log *slog.Logger - fs afero.Fs - oss operatingsystem - config *v1.InstallerConfig - exec *cmdexec -} - -func Install(log *slog.Logger, config *v1.InstallerConfig) error { - start := time.Now() - fs := afero.OsFs{} - - oss, err := detectOS(fs) - if err != nil { - return fmt.Errorf("os detection failed %w", err) - } - - i := installer{ - log: log.WithGroup("os-installer"), - fs: fs, - oss: oss, - config: config, - exec: &cmdexec{ - log: log.WithGroup("cmdexec"), - c: exec.CommandContext, - }, - } - - err = i.do() - if err != nil { - return fmt.Errorf("installation failed duration %s %w", time.Since(start).String(), err) - } - i.log.Info("installation succeeded", "duration", time.Since(start).String()) - return nil -} - -func (i *installer) do() error { - err := i.detectFirmware() - if err != nil { - i.log.Warn("no efi detected", "error", err) - return err - } - - if !i.fileExists(installYAML) { - return fmt.Errorf("no install.yaml found") - } - - // remove .dockerenv, otherwise systemd-detect-virt guesses docker which modifies the behavior of many services. - if i.fileExists("/.dockerenv") { - err := os.Remove("/.dockerenv") - if err != nil { - return fmt.Errorf("unable to delete .dockerenv") - } - } - - err = i.writeResolvConf() - if err != nil { - i.log.Warn("writing resolv.conf failed", "error", err) - return err - } - - err = i.writeNTPConf() - if err != nil { - i.log.Warn("writing ntp configuration failed", "err", err) - return err - } - - err = i.createMetalUser() - if err != nil { - return err - } - err = i.configureNetwork() - if err != nil { - return err - } - - err = i.copySSHKeys() - if err != nil { - return err - } - - err = i.fixPermissions() - if err != nil { - return err - } - - err = i.processUserdata() - if err != nil { - return err - } - - cmdLine := i.buildCMDLine() - - err = i.writeBootInfo(cmdLine) - if err != nil { - return err - } - - err = i.grubInstall(cmdLine) - if err != nil { - return err - } - - err = i.unsetMachineID() - if err != nil { - return err - } - - err = i.writeBuildMeta() - if err != nil { - return err - } - - return nil -} - -func (i *installer) detectFirmware() error { - i.log.Info("detect firmware") - - if !i.isVirtual() && !i.fileExists("/sys/firmware/efi") { - return fmt.Errorf("not running efi mode") - } - return nil -} - -func (i *installer) isVirtual() bool { - return !i.fileExists("/sys/class/dmi") -} - -func (i *installer) unsetMachineID() error { - i.log.Info("unset machine-id") - for _, p := range []string{"/etc/machine-id", "/var/lib/dbus/machine-id"} { - if !i.fileExists(p) { - continue - } - f, err := i.fs.Create(p) - if err != nil { - return err - } - _ = f.Close() - } - return nil -} - -func (i *installer) fileExists(filename string) bool { - info, err := i.fs.Stat(filename) - if os.IsNotExist(err) { - return false - } - return !info.IsDir() -} - -func (i *installer) writeResolvConf() error { - const f = "/etc/resolv.conf" - i.log.Info("write configuration", "file", f) - // Must be written here because during docker build this file is synthetic - err := i.fs.Remove(f) - if err != nil { - i.log.Info("config file not present", "file", f) - } - - content := []byte( - `nameserver 8.8.8.8 -nameserver 8.8.4.4 -`) - - if len(i.config.DNSServers) > 0 { - var s strings.Builder - for _, dnsServer := range i.config.DNSServers { - s.WriteString("nameserver " + *dnsServer.IP + "\n") - } - content = []byte(s.String()) - - } - - return afero.WriteFile(i.fs, f, content, 0644) -} - -func (i *installer) writeNTPConf() error { - if len(i.config.NTPServers) == 0 { - return nil - } - - var ( - ntpConfigPath string - s string - err error - ) - - switch i.config.Role { - case models.V1MachineAllocationRoleFirewall: - ntpConfigPath = "/etc/chrony/chrony.conf" - s, err = templates.RenderChronyTemplate(templates.Chrony{NTPServers: i.config.NTPServers}) - if err != nil { - return fmt.Errorf("error rendering chrony template %w", err) - } - - case models.V1MachineAllocationRoleMachine: - if i.oss == osDebian || i.oss == osUbuntu { - ntpConfigPath = "/etc/systemd/timesyncd.conf" - var addresses []string - for _, ntp := range i.config.NTPServers { - if ntp.Address == nil { - continue - } - addresses = append(addresses, *ntp.Address) - } - s = fmt.Sprintf("[Time]\nNTP=%s\n", strings.Join(addresses, " ")) - } - - if i.oss == osAlmalinux { - ntpConfigPath = "/etc/chrony.conf" - s, err = templates.RenderChronyTemplate(templates.Chrony{NTPServers: i.config.NTPServers}) - if err != nil { - return fmt.Errorf("error rendering chrony template %w", err) - } - } - default: - return fmt.Errorf("unknown role:%s", i.config.Role) - } - - content := []byte(s) - i.log.Info("write configuration", "file", ntpConfigPath) - err = i.fs.Remove(ntpConfigPath) - if err != nil { - i.log.Info("config file not present", "file", ntpConfigPath) - } - - return afero.WriteFile(i.fs, ntpConfigPath, content, 0644) -} - -func (i *installer) buildCMDLine() string { - i.log.Info("build kernel cmdline") - - rootUUID := i.config.RootUUID - - parts := []string{ - fmt.Sprintf("console=%s", i.config.Console), - fmt.Sprintf("root=UUID=%s", rootUUID), - "init=/sbin/init", - "net.ifnames=0", - "biosdevname=0", - "nvme_core.io_timeout=300", // 300 sec should be enough for firewalls to be replaced - } - - mdUUID, found := i.findMDUUID() - if found { - mdParts := []string{ - "rdloaddriver=raid0", - "rdloaddriver=raid1", - fmt.Sprintf("rd.md.uuid=%s", mdUUID), - } - parts = append(parts, mdParts...) - } - - return strings.Join(parts, " ") -} - -func (i *installer) findMDUUID() (mdUUID string, found bool) { - i.log.Info("detect software raid uuid") - if !i.config.RaidEnabled { - return "", false - } - - blkidOut, err := i.exec.command(&cmdParams{ - name: "blkid", - timeout: 10 * time.Second, - }) - if err != nil { - i.log.Error("unable to run blkid", "error", err) - return "", false - } - rootUUID := i.config.RootUUID - - var rootDisk string - for line := range strings.SplitSeq(string(blkidOut), "\n") { - if strings.Contains(line, rootUUID) { - rd, _, found := strings.Cut(line, ":") - if found { - rootDisk = strings.TrimSpace(rd) - break - } - } - } - if rootDisk == "" { - i.log.Error("unable to detect rootdisk") - return "", false - } - - mdadmOut, err := i.exec.command(&cmdParams{ - name: "mdadm", - args: []string{"--detail", "--export", rootDisk}, - timeout: 10 * time.Second, - }) - if err != nil { - i.log.Error("unable to run mdadm", "error", err) - return "", false - } - - for line := range strings.SplitSeq(string(mdadmOut), "\n") { - _, md, found := strings.Cut(line, "MD_UUID=") - if found { - mdUUID = md - break - } - } - - if mdUUID == "" { - i.log.Error("unable to detect md root disk") - return "", false - } - - return mdUUID, true -} - -func (i *installer) createMetalUser() error { - i.log.Info("create user", "user", "metal") - - u, err := user.Lookup("metal") - if err != nil { - if err.Error() != user.UnknownUserError("metal").Error() { - return err - } - } - if u != nil { - i.log.Info("user already exists, recreating") - _, err = i.exec.command(&cmdParams{ - name: "userdel", - args: []string{"metal"}, - timeout: 10 * time.Second, - }) - if err != nil { - return err - } - } - - _, err = i.exec.command(&cmdParams{ - name: "useradd", - args: []string{"--create-home", "--uid", "1000", "--gid", i.oss.SudoGroup(), "--shell", "/bin/bash", "metal"}, - timeout: 10 * time.Second, - }) - if err != nil { - return err - } - - _, err = i.exec.command(&cmdParams{ - name: "passwd", - args: []string{"metal"}, - timeout: 10 * time.Second, - stdin: i.config.Password + "\n" + i.config.Password + "\n", - }) - if err != nil { - return err - } - - if i.oss == osAlmalinux { - // otherwise in rescue mode the root account is locked - _, err = i.exec.command(&cmdParams{ - name: "passwd", - args: []string{"root"}, - timeout: 10 * time.Second, - stdin: i.config.Password + "\n" + i.config.Password + "\n", - }) - if err != nil { - return err - } - } - - return nil -} - -func (i *installer) configureNetwork() error { - i.log.Info("configure network") - kb, err := network.New(i.log.WithGroup("networker"), installYAML) - if err != nil { - return err - } - - var kind network.BareMetalType - switch i.config.Role { - case models.V1MachineAllocationRoleFirewall: - kind = network.Firewall - case models.V1MachineAllocationRoleMachine: - kind = network.Machine - default: - return fmt.Errorf("unknown role:%s", i.config.Role) - } - - err = kb.Validate(kind) - if err != nil { - return err - } - - c, err := network.NewConfigurator(kind, *kb, false) - if err != nil { - return err - } - c.Configure(network.ForwardPolicyDrop) - return nil -} - -func (i *installer) copySSHKeys() error { - i.log.Info("copy ssh keys") - err := i.fs.MkdirAll("/home/metal/.ssh", 0700) - if err != nil { - return err - } - - u, err := user.Lookup("metal") - if err != nil { - return err - } - - uid, err := strconv.Atoi(u.Uid) - if err != nil { - return err - } - gid, err := strconv.Atoi(u.Gid) - if err != nil { - return err - } - - err = i.fs.Chown("/home/metal/.ssh", uid, gid) - if err != nil { - return err - } - - err = afero.WriteFile(i.fs, "/home/metal/.ssh/authorized_keys", []byte(i.config.SSHPublicKey), 0600) - if err != nil { - return err - } - return i.fs.Chown("/home/metal/.ssh/authorized_keys", uid, gid) -} - -func (i *installer) fixPermissions() error { - i.log.Info("fix permissions") - for p, perm := range map[string]fs.FileMode{ - "/var/tmp": 01777, - "/etc/hosts": 0644, - } { - err := i.fs.Chmod(p, perm) - if err != nil { - return err - } - } - - return nil -} - -func (i *installer) processUserdata() error { - i.log.Info("process userdata") - if ok := i.fileExists(userdata); !ok { - i.log.Info("no userdata present, not processing userdata", "path", userdata) - return nil - } - - content, err := afero.ReadFile(i.fs, userdata) - if err != nil { - return err - } - - defer func() { - out, err := i.exec.command(&cmdParams{ - name: "systemctl", - args: []string{"preset-all"}, - }) - if err != nil { - i.log.Error("error when running systemctl preset-all, continuing anyway", "error", err, "output", string(out)) - } - }() - - if isCloudInitFile(content) { - _, err := i.exec.command(&cmdParams{ - name: "cloud-init", - args: []string{"devel", "schema", "--config-file", userdata}, - }) - if err != nil { - i.log.Error("error when running cloud-init userdata, continuing anyway", "error", err) - } - - return nil - } - - err = i.fs.Rename(userdata, "/etc/metal/config.ign") - if err != nil { - return err - } - - rawConfig, err := afero.ReadFile(i.fs, "/etc/metal/config.ign") - if err != nil { - return err - } - _, report, err := ignitionConfig.Parse(rawConfig) - if err != nil { - i.log.Error("error when validating ignition userdata, continuing anyway", "error", err) - } - - i.log.Info("executing ignition") - _, err = i.exec.command(&cmdParams{ - name: "ignition", - args: []string{"-oem", "file", "-stage", "files", "-log-to-stdout"}, - dir: "/etc/metal", - }) - if err != nil { - i.log.Error("error when running ignition, continuing anyway", "report", report.Entries, "error", err) - } - - return nil -} - -func isCloudInitFile(content []byte) bool { - for i, line := range strings.Split(string(content), "\n") { - if strings.Contains(line, "#cloud-config") { - return true - } - if i > 1 { - return false - } - } - return false -} - -func (i *installer) writeBootInfo(cmdLine string) error { - i.log.Info("write boot-info") - - kern, initrd, err := i.kernelAndInitrdPath() - if err != nil { - return err - } - - content, err := yaml.Marshal(v1.Bootinfo{ - Initrd: initrd, - Cmdline: cmdLine, - Kernel: kern, - BootloaderID: i.oss.BootloaderID(), - }) - if err != nil { - return fmt.Errorf("unable to write boot-info.yaml %w", err) - } - - return afero.WriteFile(i.fs, "/etc/metal/boot-info.yaml", content, 0700) -} - -func (i *installer) kernelAndInitrdPath() (kern string, initrd string, err error) { - // Debian 10 - // root@1f223b59051bcb12:/boot# ls -l - // total 83500 - // -rw-r--r-- 1 root root 83 Aug 13 15:25 System.map-5.10.0-17-amd64 - // -rw-r--r-- 1 root root 236286 Aug 13 15:25 config-5.10.0-17-amd64 - // -rw-r--r-- 1 root root 93842 Jul 19 2021 config-5.10.51 - // drwxr-xr-x 2 root root 4096 Oct 3 11:21 grub - // -rw-r--r-- 1 root root 34665690 Oct 3 11:22 initrd.img-5.10.0-17-amd64 - // lrwxrwxrwx 1 root root 21 Jul 19 2021 vmlinux -> /boot/vmlinux-5.10.51 - // -rwxr-xr-x 1 root root 43526368 Jul 19 2021 vmlinux-5.10.51 - // -rw-r--r-- 1 root root 6962816 Aug 13 15:25 vmlinuz-5.10.0-17-amd64 - - // Ubuntu 20.04 - // root@568551f94559b121:~# ls -l /boot/ - // total 83500 - // -rw-r--r-- 1 root root 83 Aug 13 15:25 System.map-5.10.0-17-amd64 - // -rw-r--r-- 1 root root 236286 Aug 13 15:25 config-5.10.0-17-amd64 - // -rw-r--r-- 1 root root 93842 Jul 19 2021 config-5.10.51 - // drwxr-xr-x 2 root root 4096 Oct 3 11:21 grub - // -rw-r--r-- 1 root root 34665690 Oct 3 11:22 initrd.img-5.10.0-17-amd64 - // lrwxrwxrwx 1 root root 21 Jul 19 2021 vmlinux -> /boot/vmlinux-5.10.51 - // -rwxr-xr-x 1 root root 43526368 Jul 19 2021 vmlinux-5.10.51 - // -rw-r--r-- 1 root root 6962816 Aug 13 15:25 vmlinuz-5.10.0-17-amd64 - - // Almalinux 9 - // [root@14231d4e67d28390 ~]# ls -l /boot/ - // total 160420 - // -rw------- 1 root root 8876661 Jan 7 23:19 System.map-5.14.0-503.19.1.el9_5.x86_64 - // -rw-r--r-- 1 root root 93842 Jul 19 2021 config-5.10.51 - // -rw-r--r-- 1 root root 226249 Jan 7 23:19 config-5.14.0-503.19.1.el9_5.x86_64 - // drwx------ 3 root root 4096 Jun 8 2022 efi - // drwx------ 3 root root 4096 Jan 9 08:02 grub2 - // -rw------- 1 root root 97054329 Jan 9 08:04 initramfs-5.14.0-503.19.1.el9_5.x86_64.img - // drwxr-xr-x 3 root root 4096 Jan 9 08:02 loader - // lrwxrwxrwx 1 root root 52 Jan 9 08:03 symvers-5.14.0-503.19.1.el9_5.x86_64.gz -> /lib/modules/5.14.0-503.19.1.el9_5.x86_64/symvers.gz - // lrwxrwxrwx 1 root root 21 Jul 19 2021 vmlinux -> /boot/vmlinux-5.10.51 - // -rwxr-xr-x 1 root root 43526368 Jul 19 2021 vmlinux-5.10.51 - // -rwxr-xr-x 1 root root 14467384 Jan 7 23:19 vmlinuz-5.14.0-503.19.1.el9_5.x86_64 - - var ( - bootPartition = "/boot" - systemMapPrefix = "/boot/System.map-" - ) - - systemMaps, err := afero.Glob(i.fs, systemMapPrefix+"*") - if err != nil { - return "", "", fmt.Errorf("unable to find a System.map, probably no kernel installed %w", err) - } - if len(systemMaps) != 1 { - return "", "", fmt.Errorf("more or less than a single System.map found(%v), probably no kernel or more than one kernel installed", systemMaps) - } - - systemMap := systemMaps[0] - _, kernelVersion, found := strings.Cut(systemMap, systemMapPrefix) - if !found { - return "", "", fmt.Errorf("unable to detect kernel version in System.map :%q", systemMap) - } - - kern = path.Join(bootPartition, "vmlinuz"+"-"+kernelVersion) - if !i.fileExists(kern) { - return "", "", fmt.Errorf("kernel image %q not found", kern) - } - initrd = path.Join(bootPartition, i.oss.Initramdisk(kernelVersion)) - if !i.fileExists(initrd) { - return "", "", fmt.Errorf("ramdisk %q not found", initrd) - } - - i.log.Info("detect kernel and initrd", "kernel", kern, "initrd", initrd) - - return -} - -func (i *installer) grubInstall(cmdLine string) error { - i.log.Info("install grub") - // ttyS1,115200n8 - serialPort, serialSpeed, found := strings.Cut(i.config.Console, ",") - if !found { - return fmt.Errorf("serial console could not be split into port and speed") - } - - _, serialPort, found = strings.Cut(serialPort, "ttyS") - if !found { - return fmt.Errorf("serial port could not be split") - } - - serialSpeed, _, found = strings.Cut(serialSpeed, "n8") - if !found { - return fmt.Errorf("serial speed could not be split") - } - - defaultGrub := fmt.Sprintf(`GRUB_DEFAULT=0 -GRUB_TIMEOUT=5 -GRUB_DISTRIBUTOR=%s -GRUB_CMDLINE_LINUX_DEFAULT="" -GRUB_CMDLINE_LINUX="%s" -GRUB_TERMINAL=serial -GRUB_SERIAL_COMMAND="serial --speed=%s --unit=%s --word=8" -`, i.oss.BootloaderID(), cmdLine, serialSpeed, serialPort) - - if i.oss == osAlmalinux { - defaultGrub += fmt.Sprintf("GRUB_DEVICE=UUID=%s\n", i.config.RootUUID) - defaultGrub += "GRUB_ENABLE_BLSCFG=false\n" - } - - err := afero.WriteFile(i.fs, "/etc/default/grub", []byte(defaultGrub), 0755) - if err != nil { - return err - } - - grubInstallArgs := []string{ - "--target=x86_64-efi", - "--efi-directory=/boot/efi", - "--boot-directory=/boot", - "--bootloader-id=" + i.oss.BootloaderID(), - } - if i.config.RaidEnabled { - grubInstallArgs = append(grubInstallArgs, "--no-nvram") - } - - if i.oss == osAlmalinux { - path := "/boot/grub2/grub.cfg" - if i.oss == osAlmalinux { - path = "/boot/efi/EFI/almalinux/grub.cfg" - } - _, err := i.exec.command(&cmdParams{ - name: "grub2-mkconfig", - args: []string{"-o", path}, - }) - if err != nil { - return err - } - - grubInstallArgs = append(grubInstallArgs, fmt.Sprintf("UUID=%s", i.config.RootUUID)) - } else { - grubInstallArgs = append(grubInstallArgs, "--removable") - } - - if i.config.RaidEnabled { - out, err := i.exec.command(&cmdParams{ - name: "mdadm", - args: []string{"--examine", "--scan"}, - timeout: 10 * time.Second, - }) - if err != nil { - return err - } - - out += "\nMAILADDR root\n" - - err = afero.WriteFile(i.fs, "/etc/mdadm.conf", []byte(out), 0700) - if err != nil { - return err - } - - if i.oss.NeedUpdateInitRamfs() { - err = i.fs.MkdirAll("/var/lib/initramfs-tools", 0755) - if err != nil { - return err - } - - _, err = i.exec.command(&cmdParams{ - name: "update-initramfs", - args: []string{"-u"}, - }) - if err != nil { - return err - } - } - - out, err = i.exec.command(&cmdParams{ - name: "blkid", - }) - if err != nil { - return err - } - - for line := range strings.SplitSeq(string(out), "\n") { - if strings.Contains(line, `PARTLABEL="efi"`) { - disk, _, found := strings.Cut(line, ":") - if !found { - return fmt.Errorf("unable to process blkid output lines") - } - shim := fmt.Sprintf(`\\EFI\\%s\\grubx64.efi`, i.oss.BootloaderID()) - if i.oss == osAlmalinux { - shim = fmt.Sprintf(`\\EFI\\%s\\shimx64.efi`, i.oss.BootloaderID()) - } - - _, err = i.exec.command(&cmdParams{ - name: "efibootmgr", - args: []string{"-c", "-d", disk, "-p1", "-l", shim, "-L", i.oss.BootloaderID()}, - }) - if err != nil { - return err - } - } - } - } - - if i.oss.GrubInstallCmd() != "" && !runFromCI() { - _, err = i.exec.command(&cmdParams{ - name: i.oss.GrubInstallCmd(), - args: grubInstallArgs, - }) - if err != nil { - return err - } - } - - if i.oss == osAlmalinux { - if !i.config.RaidEnabled { - return nil - } - - v, err := i.getKernelVersion() - if err != nil { - return err - } - - _, err = i.exec.command(&cmdParams{ - name: "dracut", - args: []string{ - "--mdadmconf", - "--kver", v, - "--kmoddir", "/lib/modules/" + v, - "--include", "/lib/modules/" + v, "/lib/modules/" + v, - "--fstab", - "--add=dm mdraid", - "--add-drivers=raid0 raid1", - "--hostonly", - "--force", - }, - }) - if err != nil { - return err - } - - return nil - } - - _, err = i.exec.command(&cmdParams{ - name: "update-grub2", - }) - if err != nil { - return err - } - - _, err = i.exec.command(&cmdParams{ - name: "dpkg-reconfigure", - args: []string{"grub-efi-amd64-bin"}, - env: []string{ - "DEBCONF_NONINTERACTIVE_SEEN=true", - "DEBIAN_FRONTEND=noninteractive", - }, - }) - if err != nil { - return err - } - - return nil -} - -func (i *installer) writeBuildMeta() error { - i.log.Info("writing build meta file", "path", "/etc/metal/build-meta.yaml") - - meta := &v1.BuildMeta{ - Version: v.Version, - Date: v.BuildDate, - SHA: v.GitSHA1, - Revision: v.Revision, - } - - out, err := i.exec.command(&cmdParams{ - name: "ignition", - args: []string{"-version"}, - }) - if err != nil { - i.log.Error("error detecting ignition version for build meta, continuing anyway", "error", err) - } else { - meta.IgnitionVersion = strings.TrimSpace(out) - } - - content, err := yaml.Marshal(meta) - if err != nil { - return err - } - - content = append([]byte("---\n"), content...) - - return afero.WriteFile(i.fs, "/etc/metal/build-meta.yaml", content, 0644) -} - -func (i *installer) getKernelVersion() (string, error) { - kern, _, err := i.kernelAndInitrdPath() - if err != nil { - return "", err - } - - _, version, found := strings.Cut(kern, "vmlinuz-") - if !found { - return "", fmt.Errorf("unable to determine kernel version from: %s", kern) - } - - return version, nil -} diff --git a/install_test.go b/install_test.go deleted file mode 100644 index 4a49e67..0000000 --- a/install_test.go +++ /dev/null @@ -1,1078 +0,0 @@ -package main - -import ( - "fmt" - "io/fs" - "log/slog" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/metal-stack/metal-go/api/models" - v1 "github.com/metal-stack/os-installer/api/v1" - "github.com/metal-stack/v" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" -) - -const ( - sampleInstallYAML = `--- -hostname: test-machine -networks: -- asn: 4210000000 - destinationprefixes: [] - ips: - - 192.168.0.1 - nat: false - networkid: 931b1568-9f2b-4b83-8bcb-cfc8f2a99e85 - networktype: privateprimaryshared - prefixes: - - 192.168.0.0/24 - private: true - underlay: false - vrf: 1 -- asn: 4210000000 - destinationprefixes: - - 0.0.0.0/0 - ips: - - 192.168.1.1 - nat: true - networkid: internet - networktype: external - prefixes: - - 192.168.1.0/24 - private: false - underlay: false - vrf: 104009 -machineuuid: c647818b-0573-45a1-bac4-e311db1df753 -sshpublickey: ssh-ed25519 key -password: a-password -devmode: false -console: ttyS1,115200n8 -raidenabled: false -root_uuid: "543eb7f8-98d4-d986-e669-824dbebe69e5" -timestamp: "2022-02-24T14:54:58Z" -nics: -- mac: b4:96:91:cb:64:e0 - name: eth4 - neighbors: - - mac: b8:6a:97:73:f8:5f - name: null - neighbors: [] -- mac: b4:96:91:cb:64:e1 - name: eth5 - neighbors: - - mac: b8:6a:97:74:00:5f - name: null - neighbors: []` - sampleInstallWithRaidYAML = `--- -hostname: test-machine -networks: -- asn: 4210000000 - destinationprefixes: [] - ips: - - 192.168.0.1 - nat: false - networkid: 931b1568-9f2b-4b83-8bcb-cfc8f2a99e85 - networktype: privateprimaryshared - prefixes: - - 192.168.0.0/24 - private: true - underlay: false - vrf: 1 -- asn: 4210000000 - destinationprefixes: - - 0.0.0.0/0 - ips: - - 192.168.1.1 - nat: true - networkid: internet - networktype: external - prefixes: - - 192.168.1.0/24 - private: false - underlay: false - vrf: 104009 -machineuuid: c647818b-0573-45a1-bac4-e311db1df753 -sshpublickey: ssh-ed25519 key -password: a-password -devmode: false -console: ttyS1,115200n8 -raidenabled: true -root_uuid: "ace079b5-06be-4429-bbf0-081ea4d7d0d9" -timestamp: "2022-02-24T14:54:58Z" -nics: -- mac: b4:96:91:cb:64:e0 - name: eth4 - neighbors: - - mac: b8:6a:97:73:f8:5f - name: null - neighbors: [] -- mac: b4:96:91:cb:64:e1 - name: eth5 - neighbors: - - mac: b8:6a:97:74:00:5f - name: null - neighbors: []` - sampleBlkidOutput = `/dev/sda1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="cc57c456-0b2f-6345-c597-d861cc6dd8ac" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="273985c8-d097-4123-bcd0-80b4e4e14728" -/dev/sda2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="54748c60-b566-f391-142c-fb78bb1fc6a9" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="d7863f4e-af7c-47fc-8c03-6ecdc69bc72d" -/dev/sda3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="582e9b4f-f191-e01e-85fd-2f7d969fbef6" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="e8b44f09-b7f7-4e0d-a7c3-d909617d1f05" -/dev/sdb1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="61bd5d8b-1bb8-673b-9e61-8c28dccc3812" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="13a4c568-57b0-4259-9927-9ac023aaa5f0" -/dev/sdb2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="e7d01e93-9340-5b90-68f8-d8f815595132" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="ab11cd86-37b8-4bae-81e5-21fe0a9c9ae0" -/dev/sdb3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="764217ad-1591-a83a-c799-23397f968729" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="9afbf9c1-b2ba-4b46-8db1-e802d26c93b6" -/dev/md1: LABEL="root" UUID="ace079b5-06be-4429-bbf0-081ea4d7d0d9" TYPE="ext4" -/dev/md0: LABEL="efi" UUID="C236-297F" TYPE="vfat" -/dev/md2: LABEL="varlib" UUID="385e8e8e-dbfd-481e-93a4-cba7f4d5fa02" TYPE="ext4"` - sampleMdadmDetailOutput = `MD_LEVEL=raid1 -MD_DEVICES=2 -MD_METADATA=1.0 -MD_UUID=543eb7f8:98d4d986:e669824d:bebe69e5 -MD_DEVNAME=1 -MD_NAME=any:1 -MD_DEVICE_dev_sdb2_ROLE=1 -MD_DEVICE_dev_sdb2_DEV=/dev/sdb2 -MD_DEVICE_dev_sda2_ROLE=0 -MD_DEVICE_dev_sda2_DEV=/dev/sda2` - sampleMdadmScanOutput = `ARRAY /dev/md/0 metadata=1.0 UUID=42d10089:ee1e0399:445e7550:62b63ec8 name=any:0 -ARRAY /dev/md/1 metadata=1.0 UUID=543eb7f8:98d4d986:e669824d:bebe69e5 name=any:1 -ARRAY /dev/md/2 metadata=1.0 UUID=fc32a6f0:ee40d9db:87c8c9f3:a8400c8b name=any:2` - sampleCloudInit = `#cloud-config -# Add groups to the system -# The following example adds the ubuntu group with members 'root' and 'sys' -# and the empty group cloud-users. -groups: - - admingroup: [root,sys] - - cloud-users` - sampleIgnition = `{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"}}` -) - -func mustParseInstallYAML(t *testing.T, fs afero.Fs) *v1.InstallerConfig { - config, err := parseInstallYAML(fs) - require.NoError(t, err) - return config -} - -func Test_installer_detectFirmware(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - execMocks []fakeexecparams - wantErr error - }{ - { - name: "is efi", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/sys/firmware/efi", []byte(""), 0755)) - require.NoError(t, afero.WriteFile(fs, "/sys/class/dmi", []byte(""), 0755)) - }, - wantErr: nil, - }, - { - name: "is not efi", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/sys/class/dmi", []byte(""), 0755)) - }, - wantErr: fmt.Errorf("not running efi mode"), - }, - { - name: "is not efi but virtual", - wantErr: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - log := slog.Default() - - i := &installer{ - log: log, - fs: afero.NewMemMapFs(), - exec: &cmdexec{ - log: log, - c: fakeCmd(t, tt.execMocks...), - }, - } - - if tt.fsMocks != nil { - tt.fsMocks(i.fs) - } - - err := i.detectFirmware() - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} - -func Test_installer_writeResolvConf(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - config *v1.InstallerConfig - want string - wantErr error - }{ - { - name: "resolv.conf gets written", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/resolv.conf", []byte(""), 0755)) - }, - want: `nameserver 8.8.8.8 -nameserver 8.8.4.4 -`, - wantErr: nil, - }, - { - name: "resolv.conf gets written, file is not present", - want: `nameserver 8.8.8.8 -nameserver 8.8.4.4 -`, - wantErr: nil, - }, - { - name: "overwrite resolv.conf with custom DNS", - config: &v1.InstallerConfig{DNSServers: []*models.V1DNSServer{{IP: new("1.2.3.4")}, {IP: new("5.6.7.8")}}}, - want: `nameserver 1.2.3.4 -nameserver 5.6.7.8 -`, - wantErr: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - i := &installer{ - log: slog.Default(), - fs: afero.NewMemMapFs(), - config: &v1.InstallerConfig{}, - } - - if tt.fsMocks != nil { - tt.fsMocks(i.fs) - } - - if tt.config != nil { - i.config = tt.config - } - - err := i.writeResolvConf() - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - - content, err := afero.ReadFile(i.fs, "/etc/resolv.conf") - require.NoError(t, err) - - if diff := cmp.Diff(tt.want, string(content)); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} - -func Test_installer_writeNTPConf(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - oss operatingsystem - role string - ntpServers []*models.V1NTPServer - ntpPath string - want string - wantErr error - }{ - { - name: "configure custom ntp for ubuntu machine", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/systemd/timesyncd.conf", []byte(""), 0644)) - }, - ntpPath: "/etc/systemd/timesyncd.conf", - oss: osUbuntu, - role: "machine", - ntpServers: []*models.V1NTPServer{{Address: new("custom.1.ntp.org")}, {Address: new("custom.2.ntp.org")}}, - want: `[Time] -NTP=custom.1.ntp.org custom.2.ntp.org -`, - wantErr: nil, - }, - { - name: "use default ntp for ubuntu machine", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/systemd/timesyncd.conf", []byte(""), 0644)) - }, - ntpPath: "/etc/systemd/timesyncd.conf", - oss: osUbuntu, - role: "machine", - want: "", - wantErr: nil, - }, - { - name: "configure custom ntp for debian machine", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/systemd/timesyncd.conf", []byte(""), 0644)) - }, - ntpPath: "/etc/systemd/timesyncd.conf", - oss: osDebian, - role: "machine", - ntpServers: []*models.V1NTPServer{{Address: new("custom.1.ntp.org")}, {Address: new("custom.2.ntp.org")}}, - want: `[Time] -NTP=custom.1.ntp.org custom.2.ntp.org -`, - wantErr: nil, - }, - { - name: "use default ntp for debian machine", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/systemd/timesyncd.conf", []byte(""), 0644)) - }, - ntpPath: "/etc/systemd/timesyncd.conf", - oss: osDebian, - role: "machine", - want: "", - wantErr: nil, - }, - { - name: "configure ntp for almalinux machine", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/chrony.conf", []byte(""), 0644)) - }, - oss: osAlmalinux, - ntpPath: "/etc/chrony.conf", - role: "machine", - ntpServers: []*models.V1NTPServer{{Address: new("custom.1.ntp.org")}, {Address: new("custom.2.ntp.org")}}, - want: `# Welcome to the chrony configuration file. See chrony.conf(5) for more -# information about usable directives. - -# In case no custom NTP server is provided -# Cloudflare offers a free public time service that allows us to use their -# anycast network of 180+ locations to synchronize time from their closest server. -# See https://blog.cloudflare.com/secure-time/ -pool custom.1.ntp.org iburst -pool custom.2.ntp.org iburst - -# This directive specify the location of the file containing ID/key pairs for -# NTP authentication. -keyfile /etc/chrony/chrony.keys - -# This directive specify the file into which chronyd will store the rate -# information. -driftfile /var/lib/chrony/chrony.drift - -# Uncomment the following line to turn logging on. -#log tracking measurements statistics - -# Log files location. -logdir /var/log/chrony - -# Stop bad estimates upsetting machine clock. -maxupdateskew 100.0 - -# This directive enables kernel synchronisation (every 11 minutes) of the -# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. -rtcsync - -# Step the system clock instead of slewing it if the adjustment is larger than -# one second, but only in the first three clock updates. -makestep 1 3`, - wantErr: nil, - }, - { - name: "use default ntp for almalinux machine", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/chrony.conf", []byte(""), 0644)) - }, - oss: osAlmalinux, - ntpPath: "/etc/chrony.conf", - role: "machine", - want: "", - wantErr: nil, - }, - { - name: "configure custom ntp for firewall", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/chrony/chrony.conf", []byte(""), 0644)) - }, - ntpPath: "/etc/chrony/chrony.conf", - role: "firewall", - ntpServers: []*models.V1NTPServer{{Address: new("custom.1.ntp.org")}, {Address: new("custom.2.ntp.org")}}, - want: `# Welcome to the chrony configuration file. See chrony.conf(5) for more -# information about usable directives. - -# In case no custom NTP server is provided -# Cloudflare offers a free public time service that allows us to use their -# anycast network of 180+ locations to synchronize time from their closest server. -# See https://blog.cloudflare.com/secure-time/ -pool custom.1.ntp.org iburst -pool custom.2.ntp.org iburst - -# This directive specify the location of the file containing ID/key pairs for -# NTP authentication. -keyfile /etc/chrony/chrony.keys - -# This directive specify the file into which chronyd will store the rate -# information. -driftfile /var/lib/chrony/chrony.drift - -# Uncomment the following line to turn logging on. -#log tracking measurements statistics - -# Log files location. -logdir /var/log/chrony - -# Stop bad estimates upsetting machine clock. -maxupdateskew 100.0 - -# This directive enables kernel synchronisation (every 11 minutes) of the -# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. -rtcsync - -# Step the system clock instead of slewing it if the adjustment is larger than -# one second, but only in the first three clock updates. -makestep 1 3`, - wantErr: nil, - }, - { - name: "use default ntp for firewall", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/chrony/chrony.conf", []byte(""), 0644)) - }, - ntpPath: "/etc/chrony/chrony.conf", - role: "firewall", - want: "", - wantErr: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - i := &installer{ - log: slog.Default(), - fs: afero.NewMemMapFs(), - config: &v1.InstallerConfig{Role: tt.role, NTPServers: tt.ntpServers}, - oss: tt.oss, - } - - if tt.fsMocks != nil { - tt.fsMocks(i.fs) - } - - err := i.writeNTPConf() - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - - content, err := afero.ReadFile(i.fs, tt.ntpPath) - require.NoError(t, err) - - if diff := cmp.Diff(tt.want, string(content)); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} - -func Test_installer_fixPermissions(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - wantErr error - }{ - { - name: "fix permissions", - fsMocks: func(fs afero.Fs) { - require.NoError(t, fs.MkdirAll("/var/tmp", 0000)) - require.NoError(t, afero.WriteFile(fs, "/etc/hosts", []byte("127.0.0.1"), 0000)) - }, - wantErr: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - i := &installer{ - log: slog.Default(), - fs: afero.NewMemMapFs(), - } - - if tt.fsMocks != nil { - tt.fsMocks(i.fs) - } - - err := i.fixPermissions() - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - - info, err := i.fs.Stat("/var/tmp") - require.NoError(t, err) - assert.Equal(t, fs.FileMode(01777).Perm(), info.Mode().Perm()) - - info, err = i.fs.Stat("/etc/hosts") - require.NoError(t, err) - assert.Equal(t, fs.FileMode(0644).Perm(), info.Mode().Perm()) - }) - } -} - -func Test_installer_findMDUUID(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - execMocks []fakeexecparams - want string - wantFound bool - }{ - { - name: "has mdadm", - execMocks: []fakeexecparams{ - { - WantCmd: []string{"blkid"}, - Output: sampleBlkidOutput, - ExitCode: 0, - }, - { - WantCmd: []string{"mdadm", "--detail", "--export", "/dev/md1"}, - Output: sampleMdadmDetailOutput, - ExitCode: 0, - }, - }, - want: "543eb7f8:98d4d986:e669824d:bebe69e5", - wantFound: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - - log := slog.Default() - - i := &installer{ - log: log, - exec: &cmdexec{ - log: log, - c: fakeCmd(t, tt.execMocks...), - }, - fs: fs, - config: &v1.InstallerConfig{RaidEnabled: true, RootUUID: "ace079b5-06be-4429-bbf0-081ea4d7d0d9"}, - } - - uuid, found := i.findMDUUID() - assert.Equal(t, tt.wantFound, found) - if diff := cmp.Diff(tt.want, uuid); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} - -func Test_installer_buildCMDLine(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - execMocks []fakeexecparams - want string - }{ - { - name: "without raid", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/metal/install.yaml", []byte(sampleInstallYAML), 0700)) - }, - execMocks: []fakeexecparams{ - { - WantCmd: []string{"blkid"}, - Output: sampleBlkidOutput, - ExitCode: 0, - }, - { - WantCmd: []string{"mdadm", "--detail", "--export", "/dev/md1"}, - Output: sampleMdadmDetailOutput, - ExitCode: 0, - }, - }, - // CMDLINE="console=${CONSOLE} root=UUID=${ROOT_UUID} init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" - want: "console=ttyS1,115200n8 root=UUID=543eb7f8-98d4-d986-e669-824dbebe69e5 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", - }, - { - name: "with raid", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/metal/install.yaml", []byte(sampleInstallWithRaidYAML), 0700)) - }, - execMocks: []fakeexecparams{ - { - WantCmd: []string{"blkid"}, - Output: sampleBlkidOutput, - ExitCode: 0, - }, - { - WantCmd: []string{"mdadm", "--detail", "--export", "/dev/md1"}, - Output: sampleMdadmDetailOutput, - ExitCode: 0, - }, - }, - // CMDLINE="console=${CONSOLE} root=UUID=${ROOT_UUID} init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" - want: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300 rdloaddriver=raid0 rdloaddriver=raid1 rd.md.uuid=543eb7f8:98d4d986:e669824d:bebe69e5", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - - log := slog.Default() - - i := &installer{ - log: log, - exec: &cmdexec{ - log: log, - c: fakeCmd(t, tt.execMocks...), - }, - fs: fs, - config: mustParseInstallYAML(t, fs), - } - - got := i.buildCMDLine() - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} - -func Test_installer_unsetMachineID(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - wantErr error - }{ - { - name: "unset", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/machine-id", []byte("uuid"), 0700)) - require.NoError(t, afero.WriteFile(fs, "/var/lib/dbus/machine-id", []byte("uuid"), 0700)) - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - - i := &installer{ - log: slog.Default(), - fs: fs, - } - - err := i.unsetMachineID() - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - - content, err := afero.ReadFile(i.fs, "/etc/machine-id") - require.NoError(t, err) - assert.Empty(t, content) - - content, err = afero.ReadFile(i.fs, "/var/lib/dbus/machine-id") - require.NoError(t, err) - assert.Empty(t, content) - }) - } -} - -func Test_installer_writeBootInfo(t *testing.T) { - tests := []struct { - name string - cmdline string - fsMocks func(fs afero.Fs) - oss operatingsystem - want *v1.Bootinfo - wantErr error - }{ - { - name: "boot-info ubuntu", - cmdline: "a-cmd-line", - oss: osUbuntu, - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/boot/System.map-1.2.3", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/vmlinuz-1.2.3", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/initrd.img-1.2.3", nil, 0700)) - }, - want: &v1.Bootinfo{ - Initrd: "/boot/initrd.img-1.2.3", - Cmdline: "a-cmd-line", - Kernel: "/boot/vmlinuz-1.2.3", - BootloaderID: "metal-ubuntu", - }, - }, - { - name: "more than one system.map present", - cmdline: "a-cmd-line", - oss: osUbuntu, - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/boot/System.map-1.2.3", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/System.map-1.2.4", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/vmlinuz-1.2.3", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/initrd.img-1.2.3", nil, 0700)) - }, - want: nil, - wantErr: fmt.Errorf("more or less than a single System.map found([/boot/System.map-1.2.3 /boot/System.map-1.2.4]), probably no kernel or more than one kernel installed"), - }, - { - name: "no system.map present", - cmdline: "a-cmd-line", - oss: osUbuntu, - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/boot/vmlinuz-1.2.3", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/initrd.img-1.2.3", nil, 0700)) - }, - want: nil, - wantErr: fmt.Errorf("more or less than a single System.map found([]), probably no kernel or more than one kernel installed"), - }, - { - name: "no vmlinuz present", - cmdline: "a-cmd-line", - oss: osUbuntu, - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/boot/System.map-1.2.3", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/initrd.img-1.2.3", nil, 0700)) - }, - want: nil, - wantErr: fmt.Errorf("kernel image \"/boot/vmlinuz-1.2.3\" not found"), - }, - { - name: "no ramdisk present", - cmdline: "a-cmd-line", - oss: osUbuntu, - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/boot/System.map-1.2.3", nil, 0700)) - require.NoError(t, afero.WriteFile(fs, "/boot/vmlinuz-1.2.3", nil, 0700)) - }, - want: nil, - wantErr: fmt.Errorf("ramdisk \"/boot/initrd.img-1.2.3\" not found"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - i := &installer{ - log: slog.Default(), - fs: fs, - oss: tt.oss, - } - - err := i.writeBootInfo(tt.cmdline) - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - - if tt.want != nil { - content, err := afero.ReadFile(i.fs, "/etc/metal/boot-info.yaml") - require.NoError(t, err) - - var bootInfo v1.Bootinfo - err = yaml.Unmarshal(content, &bootInfo) - require.NoError(t, err) - - if diff := cmp.Diff(tt.want, &bootInfo); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - } - }) - } -} - -func Test_installer_processUserdata(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - execMocks []fakeexecparams - oss operatingsystem - wantErr error - }{ - { - name: "no userdata given", - }, - { - name: "cloud-init", - oss: osDebian, - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/metal/userdata", []byte(sampleCloudInit), 0700)) - }, - execMocks: []fakeexecparams{ - { - WantCmd: []string{"cloud-init", "devel", "schema", "--config-file", "/etc/metal/userdata"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"systemctl", "preset-all"}, - Output: "", - ExitCode: 0, - }, - }, - }, - { - name: "ignition", - oss: osDebian, - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/metal/userdata", []byte(sampleIgnition), 0700)) - }, - execMocks: []fakeexecparams{ - { - WantCmd: []string{"ignition", "-oem", "file", "-stage", "files", "-log-to-stdout"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"systemctl", "preset-all"}, - Output: "", - ExitCode: 0, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - - log := slog.Default() - - i := &installer{ - log: log, - exec: &cmdexec{ - log: log, - c: fakeCmd(t, tt.execMocks...), - }, - fs: fs, - oss: tt.oss, - } - - err := i.processUserdata() - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} - -func Test_installer_grubInstall(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - cmdline string - execMocks []fakeexecparams - oss operatingsystem - wantGrubCfg string - wantErr error - }{ - { - name: "without raid debian/ubuntu", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/metal/install.yaml", []byte(sampleInstallYAML), 0700)) - }, - cmdline: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", - oss: osUbuntu, - execMocks: []fakeexecparams{ - { - WantCmd: []string{"grub-install", "--target=x86_64-efi", "--efi-directory=/boot/efi", "--boot-directory=/boot", "--bootloader-id=metal-ubuntu", "--removable"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"update-grub2"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"dpkg-reconfigure", "grub-efi-amd64-bin"}, - Output: "", - ExitCode: 0, - }, - }, - wantGrubCfg: `GRUB_DEFAULT=0 -GRUB_TIMEOUT=5 -GRUB_DISTRIBUTOR=metal-ubuntu -GRUB_CMDLINE_LINUX_DEFAULT="" -GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" -GRUB_TERMINAL=serial -GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" -`, - }, - { - name: "with raid debian/ubuntu", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/metal/install.yaml", []byte(sampleInstallWithRaidYAML), 0700)) - }, - cmdline: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", - oss: osUbuntu, - execMocks: []fakeexecparams{ - { - WantCmd: []string{"mdadm", "--examine", "--scan"}, - Output: sampleMdadmScanOutput, - ExitCode: 0, - }, - { - WantCmd: []string{"update-initramfs", "-u"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"blkid"}, - Output: sampleBlkidOutput, - ExitCode: 0, - }, - { - WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sda1", "-p1", "-l", "\\\\EFI\\\\metal-ubuntu\\\\grubx64.efi", "-L", "metal-ubuntu"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sdb1", "-p1", "-l", "\\\\EFI\\\\metal-ubuntu\\\\grubx64.efi", "-L", "metal-ubuntu"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"grub-install", "--target=x86_64-efi", "--efi-directory=/boot/efi", "--boot-directory=/boot", "--bootloader-id=metal-ubuntu", "--no-nvram", "--removable"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"update-grub2"}, - Output: "", - ExitCode: 0, - }, - { - WantCmd: []string{"dpkg-reconfigure", "grub-efi-amd64-bin"}, - Output: "", - ExitCode: 0, - }, - }, - wantGrubCfg: `GRUB_DEFAULT=0 -GRUB_TIMEOUT=5 -GRUB_DISTRIBUTOR=metal-ubuntu -GRUB_CMDLINE_LINUX_DEFAULT="" -GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" -GRUB_TERMINAL=serial -GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - - log := slog.Default() - - i := &installer{ - log: log, - exec: &cmdexec{ - log: log, - c: fakeCmd(t, tt.execMocks...), - }, - fs: fs, - oss: tt.oss, - config: mustParseInstallYAML(t, fs), - } - - err := i.grubInstall(tt.cmdline) - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - - content, err := afero.ReadFile(i.fs, "/etc/default/grub") - require.NoError(t, err) - - if diff := cmp.Diff(tt.wantGrubCfg, string(content)); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} - -func Test_installer_writeBuildMeta(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - execMocks []fakeexecparams - want string - wantErr error - }{ - { - name: "build meta gets written", - execMocks: []fakeexecparams{ - { - WantCmd: []string{"ignition", "-version"}, - Output: "Ignition v0.36.2", - ExitCode: 0, - }, - }, - want: `--- -buildVersion: "456" -buildDate: "" -buildSHA: abc -buildRevision: revision -ignitionVersion: Ignition v0.36.2 -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - - log := slog.Default() - - i := &installer{ - log: slog.Default(), - fs: fs, - exec: &cmdexec{ - log: log, - c: fakeCmd(t, tt.execMocks...), - }, - } - - v.Version = "456" - v.GitSHA1 = "abc" - v.Revision = "revision" - - err := i.writeBuildMeta() - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - - content, err := afero.ReadFile(i.fs, "/etc/metal/build-meta.yaml") - require.NoError(t, err) - assert.Equal(t, tt.want, string(content)) - }) - } -} - -func errorStringComparer() cmp.Option { - return cmp.Comparer(func(x, y error) bool { - if x == nil && y == nil { - return true - } - if x == nil && y != nil { - return false - } - if x != nil && y == nil { - return false - } - return x.Error() == y.Error() - }) -} diff --git a/main.go b/main.go index 7fad8a8..240cc9c 100644 --- a/main.go +++ b/main.go @@ -1,67 +1,25 @@ package main import ( + "context" "log/slog" "os" - "os/exec" - "time" - v1 "github.com/metal-stack/os-installer/api/v1" - "github.com/metal-stack/v" - "github.com/spf13/afero" - "gopkg.in/yaml.v3" + "github.com/metal-stack/os-installer/pkg/installer" ) func main() { - start := time.Now() - jsonHandler := slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{}) - log := slog.New(jsonHandler) + log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})) - log.Info("running install", "version", v.V.String()) - - fs := afero.OsFs{} - - oss, err := detectOS(fs) - if err != nil { - log.Error("installation failed", "error", err) - os.Exit(1) - } - - config, err := parseInstallYAML(fs) + details, allocation, err := installer.ReadConfigurations() if err != nil { - log.Error("installation failed", "error", err) - os.Exit(1) + log.Error("unable to read configuration", "error", err) } - i := installer{ - log: log.WithGroup("os-installer"), - fs: fs, - oss: oss, - config: config, - exec: &cmdexec{ - log: log.WithGroup("cmdexec"), - c: exec.CommandContext, - }, - } + i := installer.New(log, details, allocation) - err = i.do() - if err != nil { - i.log.Error("installation failed", "error", err, "duration", time.Since(start).String()) + if err := i.Install(context.Background()); err != nil { + log.Error("error while running machine installer", "error", err) os.Exit(1) } - - i.log.Info("installation succeeded", "duration", time.Since(start).String()) -} - -func parseInstallYAML(fs afero.Fs) (*v1.InstallerConfig, error) { - var config v1.InstallerConfig - content, err := afero.ReadFile(fs, installYAML) - if err != nil { - return nil, err - } - err = yaml.Unmarshal(content, &config) - if err != nil { - return nil, err - } - return &config, nil } diff --git a/os.go b/os.go deleted file mode 100644 index 45d35bf..0000000 --- a/os.go +++ /dev/null @@ -1,114 +0,0 @@ -package main - -import ( - "fmt" - "strconv" - "strings" - - "github.com/spf13/afero" -) - -type operatingsystem string - -const ( - osUbuntu = operatingsystem("ubuntu") - osDebian = operatingsystem("debian") - osAlmalinux = operatingsystem("almalinux") -) - -func (o operatingsystem) BootloaderID() string { - switch o { - case osAlmalinux: - return string(o) - case osDebian, osUbuntu: - return fmt.Sprintf("metal-%s", o) - default: - return fmt.Sprintf("metal-%s", o) - } -} - -func (o operatingsystem) SudoGroup() string { - switch o { - case osAlmalinux: - return "wheel" - case osDebian, osUbuntu: - return "sudo" - default: - return "sudo" - } -} - -func (o operatingsystem) Initramdisk(kernversion string) string { - switch o { - case osAlmalinux: - return fmt.Sprintf("initramfs-%s.img", kernversion) - case osDebian, osUbuntu: - return fmt.Sprintf("initrd.img-%s", kernversion) - default: - return fmt.Sprintf("initrd.img-%s", kernversion) - } -} -func (o operatingsystem) NeedUpdateInitRamfs() bool { - switch o { - case osAlmalinux: - return false - case osDebian, osUbuntu: - return true - default: - return true - } -} - -func (o operatingsystem) GrubInstallCmd() string { - switch o { - case osAlmalinux: - return "" // no execution required - case osDebian, osUbuntu: - return "grub-install" - default: - return "grub-install" - } -} - -func operatingSystemFromString(s string) (operatingsystem, error) { - unquoted, err := strconv.Unquote(s) - if err == nil { - s = unquoted - } - - switch operatingsystem(strings.ToLower(s)) { - case osUbuntu: - return osUbuntu, nil - case osDebian: - return osDebian, nil - case osAlmalinux: - return osAlmalinux, nil - default: - return operatingsystem(""), fmt.Errorf("unsupported operating system: %s", s) - } -} - -func detectOS(fs afero.Fs) (operatingsystem, error) { - content, err := afero.ReadFile(fs, "/etc/os-release") - if err != nil { - return operatingsystem(""), err - } - - env := map[string]string{} - for line := range strings.SplitSeq(string(content), "\n") { - k, v, found := strings.Cut(line, "=") - if found { - env[k] = v - } - } - - if os, ok := env["ID"]; ok { - oss, err := operatingSystemFromString(os) - if err != nil { - return operatingsystem(""), err - } - return oss, nil - } - - return operatingsystem(""), fmt.Errorf("unable to detect OS") -} diff --git a/os_test.go b/os_test.go deleted file mode 100644 index feb9f11..0000000 --- a/os_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package main - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/spf13/afero" - "github.com/stretchr/testify/require" -) - -func Test_detectOS(t *testing.T) { - tests := []struct { - name string - fsMocks func(fs afero.Fs) - want operatingsystem - wantErr error - }{ - { - name: "ubuntu 22.04 os", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/os-release", []byte(`PRETTY_NAME="Ubuntu 22.04.1 LTS" -NAME="Ubuntu" -VERSION_ID="22.04" -VERSION="22.04.1 LTS (Jammy Jellyfish)" -VERSION_CODENAME=jammy -ID=ubuntu -ID_LIKE=debian -HOME_URL="https://www.ubuntu.com/" -SUPPORT_URL="https://help.ubuntu.com/" -BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" -PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" -UBUNTU_CODENAME=jammy`), 0755)) - }, - want: osUbuntu, - wantErr: nil, - }, - { - name: "almalinux 9", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/os-release", []byte(`NAME="AlmaLinux" -VERSION="9.4 (Seafoam Ocelot)" -ID="almalinux" -ID_LIKE="rhel centos fedora" -VERSION_ID="9.4" -PLATFORM_ID="platform:el9" -PRETTY_NAME="AlmaLinux 9.4 (Seafoam Ocelot)" -ANSI_COLOR="0;34" -LOGO="fedora-logo-icon" -CPE_NAME="cpe:/o:almalinux:almalinux:9::baseos" -HOME_URL="https://almalinux.org/" -DOCUMENTATION_URL="https://wiki.almalinux.org/" -BUG_REPORT_URL="https://bugs.almalinux.org/" - -ALMALINUX_MANTISBT_PROJECT="AlmaLinux-9" -ALMALINUX_MANTISBT_PROJECT_VERSION="9.4" -REDHAT_SUPPORT_PRODUCT="AlmaLinux" -REDHAT_SUPPORT_PRODUCT_VERSION="9.4" -SUPPORT_END=2032-06-01 -`), 0755)) - }, - want: osAlmalinux, - wantErr: nil, - }, - { - name: "debian 10", - fsMocks: func(fs afero.Fs) { - require.NoError(t, afero.WriteFile(fs, "/etc/os-release", []byte(`PRETTY_NAME="Debian GNU/Linux 10 (buster)" -NAME="Debian GNU/Linux" -VERSION_ID="10" -VERSION="10 (buster)" -VERSION_CODENAME=buster -ID=debian -HOME_URL="https://www.debian.org/" -SUPPORT_URL="https://www.debian.org/support" -BUG_REPORT_URL="https://bugs.debian.org/"`), 0755)) - }, - want: osDebian, - wantErr: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - - if tt.fsMocks != nil { - tt.fsMocks(fs) - } - - oss, err := detectOS(fs) - if diff := cmp.Diff(tt.wantErr, err, errorStringComparer()); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - if diff := cmp.Diff(tt.want, oss); diff != "" { - t.Errorf("error diff (+got -want):\n %s", diff) - } - }) - } -} diff --git a/cmdexec.go b/pkg/exec/cmdexec.go similarity index 51% rename from cmdexec.go rename to pkg/exec/cmdexec.go index 5eba4cf..50e4a50 100644 --- a/cmdexec.go +++ b/pkg/exec/cmdexec.go @@ -1,4 +1,4 @@ -package main +package exec import ( "context" @@ -10,46 +10,53 @@ import ( "time" ) -type cmdexec struct { +type CmdExecutor struct { log *slog.Logger c func(ctx context.Context, name string, arg ...string) *exec.Cmd } -type cmdParams struct { - name string - args []string - dir string - timeout time.Duration - combined bool - stdin string - env []string +type Params struct { + Name string + Args []string + Dir string + Timeout time.Duration + Combined bool + Stdin string + Env []string } -func (i *cmdexec) command(p *cmdParams) (out string, err error) { +func New(log *slog.Logger) *CmdExecutor { + return &CmdExecutor{ + log: log, + c: exec.CommandContext, + } +} + +func (i *CmdExecutor) WithCommandFn(c func(ctx context.Context, name string, arg ...string) *exec.Cmd) *CmdExecutor { + i.c = c + return i +} + +func (i *CmdExecutor) Execute(ctx context.Context, p *Params) (out string, err error) { var ( start = time.Now() output []byte ) - i.log.Info("running command", "command", strings.Join(append([]string{p.name}, p.args...), " "), "start", start.String()) + i.log.Debug("running command", "command", strings.Join(append([]string{p.Name}, p.Args...), " "), "start", start.String()) - ctx := context.Background() - if p.timeout != 0 { + if p.Timeout != 0 { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, p.timeout) + ctx, cancel = context.WithTimeout(ctx, p.Timeout) defer cancel() } - cmd := i.c(ctx, p.name, p.args...) - if p.dir != "" { - cmd.Dir = "/etc/metal" - } - - cmd.Env = append(cmd.Env, p.env...) + cmd := i.c(ctx, p.Name, p.Args...) + cmd.Env = append(cmd.Env, p.Env...) // show stderr cmd.Stderr = os.Stderr - if p.stdin != "" { + if p.Stdin != "" { stdin, err := cmd.StdinPipe() if err != nil { return "", err @@ -59,14 +66,14 @@ func (i *cmdexec) command(p *cmdParams) (out string, err error) { defer func() { _ = stdin.Close() }() - _, err = io.WriteString(stdin, p.stdin) + _, err = io.WriteString(stdin, p.Stdin) if err != nil { i.log.Error("error when writing to command's stdin", "error", err) } }() } - if p.combined { + if p.Combined { output, err = cmd.CombinedOutput() } else { output, err = cmd.Output() diff --git a/pkg/exec/doc.go b/pkg/exec/doc.go deleted file mode 100644 index 67e089d..0000000 --- a/pkg/exec/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package exec groups functionality related for command execution. -*/ -package exec diff --git a/pkg/exec/verbosecmd.go b/pkg/exec/verbosecmd.go deleted file mode 100644 index 662eda4..0000000 --- a/pkg/exec/verbosecmd.go +++ /dev/null @@ -1,32 +0,0 @@ -package exec - -import ( - "bytes" - "fmt" - "os/exec" -) - -// VerboseCmd represents a system command with verbose output to be able to get an idea of the issue in case the cmd -// fails. -type VerboseCmd struct { - Cmd exec.Cmd -} - -// NewVerboseCmd creates a new instance of VerboseCmd. -func NewVerboseCmd(name string, args ...string) VerboseCmd { - cmd := exec.Command(name, args...) - return VerboseCmd{*cmd} -} - -//Run executes the command and returns any errors in case exist. -func (v VerboseCmd) Run() error { - var stderr bytes.Buffer - v.Cmd.Stderr = &stderr - - err := v.Cmd.Run() - if err != nil { - return fmt.Errorf("%w: %s", err, stderr.String()) - } - - return nil -} diff --git a/pkg/network/tpl/frr.firewall.tpl b/pkg/frr/frr.firewall.tpl similarity index 95% rename from pkg/network/tpl/frr.firewall.tpl rename to pkg/frr/frr.firewall.tpl index 00a45da..7f7080b 100644 --- a/pkg/network/tpl/frr.firewall.tpl +++ b/pkg/frr/frr.firewall.tpl @@ -1,7 +1,6 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.FirewallFRRData*/ -}} {{- $ASN := .ASN -}} {{- $RouterId := .RouterID -}} -{{ .Comment }} +# {{ .Comment }} frr version {{ .FRRVersion }} frr defaults datacenter hostname {{ .Hostname }} @@ -103,4 +102,4 @@ route-map LOOPBACKS permit 10 bgp as-path access-list SELF permit ^$ ! line vty -! \ No newline at end of file +! diff --git a/pkg/frr/frr.go b/pkg/frr/frr.go new file mode 100644 index 0000000..c013a09 --- /dev/null +++ b/pkg/frr/frr.go @@ -0,0 +1,246 @@ +package frr + +import ( + "context" + "fmt" + "log/slog" + "net/netip" + "os/exec" + + "github.com/Masterminds/semver/v3" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" + "github.com/spf13/afero" + + _ "embed" +) + +const ( + comment = "generated by os-installer" + + serviceName = "frr.service" + + frrConfigPath = "/etc/frr/frr.conf" + + // defaultFrrVersion holds a string that is used in the frr.conf to define the FRR version. + defaultFrrVersion = "8.5" + // ipPrefixListSeqSeed specifies the initial value for prefix lists sequence number. + ipPrefixListSeqSeed = 100 + // ipPrefixListNoExportSuffix defines the suffix to use for private IP ranges that must not be exported. + ipPrefixListNoExportSuffix = "-no-export" + // routeMapOrderSeed defines the initial value for route-map order. + routeMapOrderSeed = 10 +) + +var ( + //go:embed frr.firewall.tpl + firewallTemplateString string + //go:embed frr.machine.tpl + machineTemplateString string +) + +type ( + Config struct { + Log *slog.Logger + Reload bool + Validate bool + + Network *network.Network + + FRRVersion *semver.Version + + fs afero.Fs + } + + // frrData contains attributes to hold FRR configuration of all kind of bare metal servers. + frrData struct { + ASN int64 + Comment string + FRRVersion string + Hostname string + RouterID string + VRFs []vrf + } + + // vrf represents data required to render vrf information into frr.conf. + vrf struct { + Comment string + ID uint64 + Table uint64 + VNI uint64 + ImportVRFNames []string + IPPrefixLists []ipPrefixList + RouteMaps []routeMap + FRRVersion *frrVersion + } + + // ipPrefixList represents 'ip prefix-list' filtering mechanism to be used in combination with route-maps. + ipPrefixList struct { + Name string + Spec string + AddressFamily string + // SourceVRF specifies from which VRF the given prefix list should be imported + SourceVRF string + } + + // routeMap represents a route-map to permit or deny routes. + routeMap struct { + Name string + Entries []string + Policy accessPolicy + Order int + } + + frrVersion struct { + Major uint64 + Minor uint64 + } +) + +// Renders renders frr configuration according to the given input data and reloads the service if necessary +func Render(ctx context.Context, cfg *Config) (changed bool, err error) { + cfg.Log.Debug("render frr configuration") + var ( + data any + template string + ) + + if cfg.Network.IsMachine() { + net, err := cfg.Network.PrivatePrimaryNetwork() + if err != nil { + return false, err + } + data = frrData{ + FRRVersion: defaultFrrVersion, + Hostname: cfg.Network.Hostname(), + Comment: comment, + ASN: int64(net.Asn), + RouterID: routerID(net), + } + template = machineTemplateString + } else { + net, err := cfg.Network.UnderlayNetwork() + if err != nil { + return false, err + } + vrfs, err := assembleVRFs(cfg) + if err != nil { + return false, err + } + + data = frrData{ + FRRVersion: defaultFrrVersion, + Hostname: cfg.Network.Hostname(), + Comment: comment, + ASN: int64(net.Asn), + RouterID: routerID(net), + VRFs: vrfs, + } + template = firewallTemplateString + } + + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: template, + Data: data, + Fs: cfg.fs, + Validate: func(path string) error { + if !cfg.Validate { + return nil + } + + return validate(path) + }, + }) + if err != nil { + return false, err + } + + changed, err = r.Render(ctx, frrConfigPath) + if err != nil { + return changed, err + } + + if cfg.Reload && changed { + if err := systemd_renderer.Reload(ctx, cfg.Log, serviceName); err != nil { + return changed, err + } + } + + return +} + +// routerID will calculate the bgp router-id which must only be specified in the ipv6 range. +// returns 0.0.0.0 for erroneous ip addresses and 169.254.255.255 for ipv6 +// TODO prepare machine allocations with ipv6 primary address and tests +func routerID(net *apiv2.MachineNetwork) string { + if len(net.Ips) < 1 { + return "0.0.0.0" + } + ip, err := netip.ParseAddr(net.Ips[0]) + if err != nil { + return "0.0.0.0" + } + if ip.Is4() { + return ip.String() + } + return "169.254.255.255" +} + +// Validate can be used to run validation on FRR configuration using vtysh. +func validate(frrConfigPath string) error { + vtysh := fmt.Sprintf("vtysh --dryrun --inputfile %s", frrConfigPath) + cmd := exec.Command("bash", "-c", vtysh, frrConfigPath) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("vtysh validation failed, output:%s error %w", string(out), err) + } + return nil +} + +func assembleVRFs(cfg *Config) ([]vrf, error) { + var ( + result []vrf + frr *frrVersion + ) + + if cfg.FRRVersion == nil { + frrVersion, err := DetectVersion(cfg.Log) + if err != nil { + return nil, fmt.Errorf("unable to detect frr version: %w", err) + } + + cfg.FRRVersion = frrVersion + } + + frr = &frrVersion{ + Major: cfg.FRRVersion.Major(), + Minor: cfg.FRRVersion.Minor(), + } + + for _, n := range cfg.Network.AllocationNetworks() { + switch n.NetworkType { + case apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, apiv2.NetworkType_NETWORK_TYPE_SUPER, apiv2.NetworkType_NETWORK_TYPE_SUPER_NAMESPACED: + continue + } + + i, err := importRulesForNetwork(cfg, n) + if err != nil { + return nil, err + } + + vrf := vrf{ + ID: n.Vrf, + VNI: n.Vrf, + ImportVRFNames: i.ImportVRFs, + IPPrefixLists: i.prefixLists(), + RouteMaps: i.routeMaps(), + FRRVersion: frr, + } + result = append(result, vrf) + } + + return result, nil +} diff --git a/pkg/network/tpl/frr.machine.tpl b/pkg/frr/frr.machine.tpl similarity index 92% rename from pkg/network/tpl/frr.machine.tpl rename to pkg/frr/frr.machine.tpl index df8c05e..263dc38 100644 --- a/pkg/network/tpl/frr.machine.tpl +++ b/pkg/frr/frr.machine.tpl @@ -1,7 +1,6 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.FirewallFRRData*/ -}} {{- $ASN := .ASN -}} {{- $RouterId := .RouterID -}} -{{ .Comment }} +# {{ .Comment }} frr version {{ .FRRVersion }} frr defaults datacenter hostname {{ .Hostname }} @@ -59,4 +58,4 @@ route-map only-self-out permit 10 match as-path SELF ! route-map only-self-out deny 99 -! \ No newline at end of file +! diff --git a/pkg/frr/frr_test.go b/pkg/frr/frr_test.go new file mode 100644 index 0000000..2ebecdf --- /dev/null +++ b/pkg/frr/frr_test.go @@ -0,0 +1,450 @@ +package frr + +import ( + "embed" + "log/slog" + "path" + "testing" + + "github.com/Masterminds/semver/v3" + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + //go:embed test + expectedFrrFiles embed.FS + + firewallAllocation = &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + } + + firewallAllocationDualStack = &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"2002::/64"}, + Ips: []string{"2002::1"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2a02:c00:20::1", "185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "2a02:c00:20::/45"}, + DestinationPrefixes: []string{"::/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + } + + firewallFrr9Allocation = &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + } + + firewallFrr10Allocation = &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + } + + firewallSharedAllocation = &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "dd429d45-db03-4627-887f-bf7761d376a5", + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Project: new("dd429d45-db03-4627-887f-bf7761d376a5"), + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + }, + } + + firewallIPv6Allocation = &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"2002::/64"}, + Ips: []string{"2002::1"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2a02:c00:20::1"}, + Prefixes: []string{"2a02:c00:20::/45"}, + DestinationPrefixes: []string{"::/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + } + + machineAllocation = &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-a", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.17.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + } +) + +func TestRender(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + frrVersion *semver.Version + wantFilePath string + wantErr error + }{ + { + name: "render firewall", + allocation: firewallAllocation, + frrVersion: semver.MustParse("9.0.1"), + wantFilePath: "frr.conf.firewall", + wantErr: nil, + }, + { + name: "render firewall, dualstack", + allocation: firewallAllocationDualStack, + frrVersion: semver.MustParse("9.0.1"), + wantFilePath: "frr.conf.firewall_dualstack", + wantErr: nil, + }, + { + name: "render firewall frr-9", + allocation: firewallFrr9Allocation, + frrVersion: semver.MustParse("9.0.1"), + wantFilePath: "frr.conf.firewall_frr-9", + wantErr: nil, + }, + { + name: "render firewall frr-10", + allocation: firewallFrr10Allocation, + frrVersion: semver.MustParse("10.4.1"), + wantFilePath: "frr.conf.firewall_frr-10", + wantErr: nil, + }, + { + name: "render firewall shared", + allocation: firewallSharedAllocation, + frrVersion: semver.MustParse("9.0.1"), + wantFilePath: "frr.conf.firewall_shared", + wantErr: nil, + }, + { + name: "render firewall ipv6", + allocation: firewallIPv6Allocation, + frrVersion: semver.MustParse("9.0.1"), + wantFilePath: "frr.conf.firewall_ipv6", + wantErr: nil, + }, + { + name: "render machine", + allocation: machineAllocation, + frrVersion: semver.MustParse("9.0.1"), + wantFilePath: "frr.conf.machine", + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + _, gotErr := Render(t.Context(), &Config{ + Log: slog.Default(), + fs: fs, + Network: network.New(tt.allocation), + FRRVersion: tt.frrVersion, + Validate: false, + }) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(frrConfigPath) + require.NoError(t, err) + + assert.Equal(t, mustReadExpected(tt.wantFilePath), string(content)) + }) + } +} + +func mustReadExpected(name string) string { + tpl, err := expectedFrrFiles.ReadFile(path.Join("test", name)) + if err != nil { + panic(err) + } + + return string(tpl) +} diff --git a/pkg/frr/frr_version.go b/pkg/frr/frr_version.go new file mode 100644 index 0000000..9a50f47 --- /dev/null +++ b/pkg/frr/frr_version.go @@ -0,0 +1,70 @@ +package frr + +import ( + "fmt" + "log/slog" + "os/exec" + "strings" + + "github.com/Masterminds/semver/v3" +) + +func DetectVersion(log *slog.Logger) (*semver.Version, error) { + vtysh, err := exec.LookPath("vtysh") + if err != nil { + return nil, fmt.Errorf("unable to detect path to vtysh: %w", err) + } + + // $ vtysh -c "show version"|grep FRRouting + // FRRouting 10.2.1 (shoot--pz9cjf--mwen-fel-firewall-dcedd) on Linux(6.6.60-060660-generic). + + // $ vtysh -h + // Usage : vtysh [OPTION...] + // Integrated shell for FRR (version 10.4.3). + + // Usage : vtysh [OPTION...] + // Integrated shell for FRR (version 8.4.4). + + c := exec.Command(vtysh, "-h") + out, err := c.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("unable to detect frr version with vtysh output:%s error: %w", string(out), err) + } + + return parseVersion(log, string(out)) +} + +func parseVersion(log *slog.Logger, vtyshOutput string) (*semver.Version, error) { + var frrVersion string + + log.Debug("parseVersion", "vtysh output", vtyshOutput) + for line := range strings.SplitSeq(vtyshOutput, "\n") { + if !strings.Contains(line, "Integrated shell for FRR") { + continue + } + + _, dirtyVersion, found := strings.Cut(line, "(version ") + if !found { + continue + } + + version, _, found := strings.Cut(dirtyVersion, ").") + if !found { + continue + } + + frrVersion = version + break + } + + if frrVersion == "" { + return nil, fmt.Errorf("unable to detect frr version") + } + + ver, err := semver.NewVersion(frrVersion) + if err != nil { + return nil, fmt.Errorf("unable to parse frr version to semver: %w", err) + } + + return ver, nil +} diff --git a/pkg/frr/frr_version_test.go b/pkg/frr/frr_version_test.go new file mode 100644 index 0000000..4c0e301 --- /dev/null +++ b/pkg/frr/frr_version_test.go @@ -0,0 +1,95 @@ +package frr + +import ( + "log/slog" + "testing" + + "github.com/Masterminds/semver/v3" + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/stretchr/testify/require" +) + +func TestDetectVersion(t *testing.T) { + tests := []struct { + name string + cmdoutput string + want *semver.Version + wantErr error + }{ + { + name: "frr 10.4", + cmdoutput: ` + vtysh -h + Usage : vtysh [OPTION...] + Integrated shell for FRR (version 10.4.3). + `, + want: semver.MustParse("10.4.3"), + wantErr: nil, + }, + { + name: "frr 8.4", + cmdoutput: ` + Integrated shell for FRR (version 8.4.4). + Configured with: + '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' '--sysconfdir=/etc' '--localstatedir=/var' '--disable-option-checking' '--disable-silent-rules' '--libdir=${prefix}/lib/x86_64-linux-gnu' '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' '--localstatedir=/var/run/frr' '--sbindir=/usr/lib/frr' '--sysconfdir=/etc/frr' '--with-vtysh-pager=/usr/bin/pager' '--libdir=/usr/lib/x86_64-linux-gnu/frr' '--with-moduledir=/usr/lib/x86_64-linux-gnu/frr/modules' '--disable-dependency-tracking' '--enable-rpki' '--disable-scripting' '--disable-pim6d' '--with-libpam' '--enable-doc' '--enable-doc-html' '--enable-snmp' '--enable-fpm' '--disable-protobuf' '--disable-zeromq' '--enable-ospfapi' '--enable-bgp-vnc' '--enable-multipath=256' '--enable-user=frr' '--enable-group=frr' '--enable-vty-group=frrvty' '--enable-configfile-mask=0640' '--enable-logfile-mask=0640' 'build_alias=x86_64-linux-gnu' 'PYTHON=python3' + + -b, --boot Execute boot startup configuration + -c, --command Execute argument as command + -d, --daemon Connect only to the specified daemon + -f, --inputfile Execute commands from specific file and exit + -E, --echo Echo prompt and command in -c mode + -C, --dryrun Check configuration for validity and exit + -m, --markfile Mark input file with context end + --vty_socket Override vty socket path + --config_dir Override config directory path + `, + want: semver.MustParse("8.4.4"), + wantErr: nil, + }, + + { + name: "10.4.1", + cmdoutput: `Usage : vtysh [OPTION...] + +Integrated shell for FRR (version 10.4.1). +Configured with: + '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' '--sysconfdir=/etc' '--localstatedir=/var' '--disable-option-checking' '--disable-silent-rules' '--libdir=${prefix}/lib/x86_64-linux-gnu' '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' '--sbindir=/usr/lib/frr' '--with-vtysh-pager=/usr/bin/pager' '--libdir=/usr/lib/x86_64-linux-gnu/frr' '--with-moduledir=/usr/lib/x86_64-linux-gnu/frr/modules' '--disable-dependency-tracking' '--enable-rpki' '--disable-scripting' '--enable-pim6d' '--disable-grpc' '--with-libpam' '--enable-doc' '--enable-doc-html' '--enable-snmp' '--enable-fpm' '--disable-protobuf' '--disable-zeromq' '--enable-ospfapi' '--enable-bgp-vnc' '--enable-cumulus=yes' '--enable-multipath=256' '--enable-pcre2posix' '--enable-user=frr' '--enable-group=frr' '--enable-vty-group=frrvty' '--enable-configfile-mask=0640' '--enable-logfile-mask=0640' 'build_alias=x86_64-linux-gnu' 'PYTHON=python3' + +-b, --boot Execute boot startup configuration +-c, --command Execute argument as command +-d, --daemon Connect only to the specified daemon +-f, --inputfile Execute commands from specific file and exit +-E, --echo Echo prompt and command in -c mode +-C, --dryrun Check configuration for validity and exit +-m, --markfile Mark input file with context end + --vty_socket Override vty socket path + --config_dir Override config directory path +-N --pathspace Insert prefix into config & socket paths +-u --user Run as an unprivileged user +-w, --writeconfig Write integrated config (frr.conf) and exit +-H, --histfile Override history file +-t, --timestamp Print a timestamp before going to shell or reading the configuration + --no-fork Don't fork clients to handle daemons (slower for large configs) + --exec-timeout Set an idle timeout for this vtysh session +-h, --help Display this help and exit + +Note that multiple commands may be executed from the command +line by passing multiple -c args, or by embedding linefeed +characters in one or more of the commands.`, + want: semver.MustParse("10.4.1"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseVersion(slog.Default(), tt.cmdoutput) + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + if tt.wantErr != nil { + return + } + require.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/network/routemap.go b/pkg/frr/routemap.go similarity index 56% rename from pkg/network/routemap.go rename to pkg/frr/routemap.go index 157e6ba..ef2c464 100644 --- a/pkg/network/routemap.go +++ b/pkg/frr/routemap.go @@ -1,79 +1,64 @@ -package network +package frr import ( "fmt" "net/netip" + "slices" "sort" "strings" - "github.com/metal-stack/metal-go/api/models" - mn "github.com/metal-stack/metal-lib/pkg/net" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + nwutil "github.com/metal-stack/os-installer/pkg/network" ) -type importPrefix struct { - Prefix netip.Prefix - Policy AccessPolicy - SourceVRF string -} - -type importRule struct { - TargetVRF string - ImportVRFs []string - ImportPrefixes []importPrefix - ImportPrefixesNoExport []importPrefix -} - -type ImportSettings struct { - ImportPrefixes []importPrefix - ImportPrefixesNoExport []importPrefix -} +const ( + // permit defines an access policy that allows access. + permit accessPolicy = "permit" + // deny defines an access policy that forbids access. + deny accessPolicy = "deny" +) -func (i *importRule) bySourceVrf() map[string]ImportSettings { - r := map[string]ImportSettings{} - for _, vrf := range i.ImportVRFs { - r[vrf] = ImportSettings{} - } +type ( + // accessPolicy is a type that represents a policy to manage access roles. + accessPolicy string - for _, pfx := range i.ImportPrefixes { - e := r[pfx.SourceVRF] - e.ImportPrefixes = append(e.ImportPrefixes, pfx) - r[pfx.SourceVRF] = e + importPrefix struct { + Prefix netip.Prefix + Policy accessPolicy + SourceVRF string } - for _, pfx := range i.ImportPrefixesNoExport { - e := r[pfx.SourceVRF] - e.ImportPrefixesNoExport = append(e.ImportPrefixesNoExport, pfx) - r[pfx.SourceVRF] = e + importRule struct { + TargetVRF string + ImportVRFs []string + ImportPrefixes []importPrefix + ImportPrefixesNoExport []importPrefix } +) - return r -} - -func importRulesForNetwork(kb config, network *models.V1MachineNetwork) *importRule { +func importRulesForNetwork(cfg *Config, network *apiv2.MachineNetwork) (*importRule, error) { vrfName := vrfNameOf(network) - - if network.Networktype == nil || *network.Networktype == mn.Underlay { - return nil - } i := importRule{ TargetVRF: vrfName, } - privatePrimaryNet := kb.getPrivatePrimaryNetwork() + privatePrimaryNet, err := cfg.Network.PrivatePrimaryNetwork() + if err != nil { + return nil, err + } - externalNets := kb.GetNetworks(mn.External) - privateSecondarySharedNets := kb.GetNetworks(mn.PrivateSecondaryShared) + externalNets := cfg.Network.GetNetworks(apiv2.NetworkType_NETWORK_TYPE_EXTERNAL) + privateSecondarySharedNets := cfg.Network.PrivateSecondarySharedNetworks() - nt := *network.Networktype - switch nt { - case mn.PrivatePrimaryUnshared: - fallthrough - case mn.PrivatePrimaryShared: + if network.Network == privatePrimaryNet.Network { // reach out from private network into public networks i.ImportVRFs = vrfNamesOf(externalNets) i.ImportPrefixes = getDestinationPrefixes(externalNets) // deny public address of default network - defaultNet := kb.GetDefaultRouteNetwork() + defaultNet, err := cfg.Network.GetDefaultRouteNetwork() + if err != nil { + return nil, err + } for _, ip := range defaultNet.Ips { if parsed, err := netip.ParseAddr(ip); err == nil { var bl = 32 @@ -82,7 +67,7 @@ func importRulesForNetwork(kb config, network *models.V1MachineNetwork) *importR } i.ImportPrefixes = append(i.ImportPrefixes, importPrefix{ Prefix: netip.PrefixFrom(parsed, bl), - Policy: Deny, + Policy: deny, SourceVRF: vrfNameOf(defaultNet), }) } @@ -95,41 +80,46 @@ func importRulesForNetwork(kb config, network *models.V1MachineNetwork) *importR i.ImportVRFs = append(i.ImportVRFs, vrfNamesOf(privateSecondarySharedNets)...) i.ImportPrefixes = append(i.ImportPrefixes, prefixesOfNetworks(privateSecondarySharedNets)...) - // reach out from private network to destination prefixes of private secondays shared networks + // reach out from private network to destination prefixes of private secondary shared networks for _, n := range privateSecondarySharedNets { - for _, pfx := range n.Destinationprefixes { + for _, pfx := range n.DestinationPrefixes { ppfx := netip.MustParsePrefix(pfx) - isThere := false + var exists bool for _, i := range i.ImportPrefixes { if i.Prefix == ppfx { - isThere = true + exists = true } } - if !isThere { + if !exists { i.ImportPrefixes = append(i.ImportPrefixes, importPrefix{ Prefix: ppfx, - Policy: Permit, + Policy: permit, SourceVRF: vrfNameOf(n), }) } } } - case mn.PrivateSecondaryShared: + + return &i, nil + } + + switch network.NetworkType { + case apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED: // reach out from private shared networks into private primary network i.ImportVRFs = []string{vrfNameOf(privatePrimaryNet)} - i.ImportPrefixes = concatPfxSlices(prefixesOfNetwork(privatePrimaryNet, vrfNameOf(privatePrimaryNet)), prefixesOfNetwork(network, vrfNameOf(privatePrimaryNet))) + i.ImportPrefixes = slices.Concat(prefixesOfNetwork(privatePrimaryNet, vrfNameOf(privatePrimaryNet)), prefixesOfNetwork(network, vrfNameOf(privatePrimaryNet))) // import destination prefixes of dmz networks from external networks - if len(network.Destinationprefixes) > 0 { - for _, pfx := range network.Destinationprefixes { + if len(network.DestinationPrefixes) > 0 { + for _, pfx := range network.DestinationPrefixes { for _, e := range externalNets { importExternalNet := false - for _, epfx := range e.Destinationprefixes { + for _, epfx := range e.DestinationPrefixes { if pfx == epfx { importExternalNet = true i.ImportPrefixes = append(i.ImportPrefixes, importPrefix{ Prefix: netip.MustParsePrefix(pfx), - Policy: Permit, + Policy: permit, SourceVRF: vrfNameOf(e), }) } @@ -141,16 +131,16 @@ func importRulesForNetwork(kb config, network *models.V1MachineNetwork) *importR } } } - case mn.External: + case apiv2.NetworkType_NETWORK_TYPE_EXTERNAL: // reach out from public into private and other public networks i.ImportVRFs = []string{vrfNameOf(privatePrimaryNet)} i.ImportPrefixes = prefixesOfNetwork(network, vrfNameOf(privatePrimaryNet)) - nets := []*models.V1MachineNetwork{privatePrimaryNet} + nets := []*apiv2.MachineNetwork{privatePrimaryNet} - if containsDefaultRoute(network.Destinationprefixes) { + if nwutil.ContainsDefaultRoute(network.DestinationPrefixes) { for _, r := range privateSecondarySharedNets { - if containsDefaultRoute(r.Destinationprefixes) { + if nwutil.ContainsDefaultRoute(r.DestinationPrefixes) { nets = append(nets, r) i.ImportVRFs = append(i.ImportVRFs, vrfNameOf(r)) } @@ -159,18 +149,21 @@ func importRulesForNetwork(kb config, network *models.V1MachineNetwork) *importR i.ImportPrefixesNoExport = prefixesOfNetworks(nets) } - return &i + return &i, nil } -func (i *importRule) prefixLists() []IPPrefixList { - var result []IPPrefixList - seed := IPPrefixListSeqSeed - afs := []AddressFamily{AddressFamilyIPv4, AddressFamilyIPv6} +func (i *importRule) prefixLists() []ipPrefixList { + var ( + result []ipPrefixList + seed = ipPrefixListSeqSeed + afs = []apiv2.NetworkAddressFamily{apiv2.NetworkAddressFamily_NETWORK_ADDRESS_FAMILY_V4, apiv2.NetworkAddressFamily_NETWORK_ADDRESS_FAMILY_V6} + ) + for _, af := range afs { pfxList := prefixLists(i.ImportPrefixesNoExport, af, false, seed, i.TargetVRF) result = append(result, pfxList...) - seed = IPPrefixListSeqSeed + len(result) + seed = ipPrefixListSeqSeed + len(result) result = append(result, prefixLists(i.ImportPrefixes, af, true, seed, i.TargetVRF)...) } @@ -179,18 +172,24 @@ func (i *importRule) prefixLists() []IPPrefixList { func prefixLists( prefixes []importPrefix, - af AddressFamily, + af apiv2.NetworkAddressFamily, isExported bool, seed int, vrf string, -) []IPPrefixList { - var result []IPPrefixList +) []ipPrefixList { + afString := "ip" + if af == apiv2.NetworkAddressFamily_NETWORK_ADDRESS_FAMILY_V6 { + afString = "ipv6" + } + + var result []ipPrefixList + for _, p := range prefixes { - if af == AddressFamilyIPv4 && !p.Prefix.Addr().Is4() { + if af == apiv2.NetworkAddressFamily_NETWORK_ADDRESS_FAMILY_V4 && !p.Prefix.Addr().Is4() { continue } - if af == AddressFamilyIPv6 && !p.Prefix.Addr().Is6() { + if af == apiv2.NetworkAddressFamily_NETWORK_ADDRESS_FAMILY_V6 && !p.Prefix.Addr().Is6() { continue } @@ -201,12 +200,14 @@ func prefixLists( continue } name := p.name(vrf, isExported) - prefixList := IPPrefixList{ + + prefixList := ipPrefixList{ Name: name, Spec: spec, - AddressFamily: af, + AddressFamily: afString, SourceVRF: p.SourceVRF, } + result = append(result, prefixList) } seed++ @@ -214,39 +215,31 @@ func prefixLists( return result } -func concatPfxSlices(pfxSlices ...[]importPrefix) []importPrefix { - res := []importPrefix{} - for _, pfxSlice := range pfxSlices { - res = append(res, pfxSlice...) - } - return res -} - -func stringSliceToIPPrefix(s []string, sourceVrf string) []importPrefix { +func convertToImportPrefixes(prefixes []string, sourceVrf string) []importPrefix { var result []importPrefix - for _, e := range s { - ipp, err := netip.ParsePrefix(e) + for _, prefix := range prefixes { + ipp, err := netip.ParsePrefix(prefix) if err != nil { continue } result = append(result, importPrefix{ Prefix: ipp, - Policy: Permit, + Policy: permit, SourceVRF: sourceVrf, }) } return result } -func getDestinationPrefixes(networks []*models.V1MachineNetwork) []importPrefix { +func getDestinationPrefixes(networks []*apiv2.MachineNetwork) []importPrefix { var result []importPrefix for _, network := range networks { - result = append(result, stringSliceToIPPrefix(network.Destinationprefixes, vrfNameOf(network))...) + result = append(result, convertToImportPrefixes(network.DestinationPrefixes, vrfNameOf(network))...) } return result } -func prefixesOfNetworks(networks []*models.V1MachineNetwork) []importPrefix { +func prefixesOfNetworks(networks []*apiv2.MachineNetwork) []importPrefix { var result []importPrefix for _, network := range networks { result = append(result, prefixesOfNetwork(network, vrfNameOf(network))...) @@ -254,15 +247,15 @@ func prefixesOfNetworks(networks []*models.V1MachineNetwork) []importPrefix { return result } -func prefixesOfNetwork(network *models.V1MachineNetwork, sourceVrf string) []importPrefix { - return stringSliceToIPPrefix(network.Prefixes, sourceVrf) +func prefixesOfNetwork(network *apiv2.MachineNetwork, sourceVrf string) []importPrefix { + return convertToImportPrefixes(network.Prefixes, sourceVrf) } -func vrfNameOf(n *models.V1MachineNetwork) string { - return fmt.Sprintf("vrf%d", *n.Vrf) +func vrfNameOf(n *apiv2.MachineNetwork) string { + return fmt.Sprintf("vrf%d", n.Vrf) } -func vrfNamesOf(networks []*models.V1MachineNetwork) []string { +func vrfNamesOf(networks []*apiv2.MachineNetwork) []string { var result []string for _, n := range networks { result = append(result, vrfNameOf(n)) @@ -271,8 +264,8 @@ func vrfNamesOf(networks []*models.V1MachineNetwork) []string { return result } -func byName(prefixLists []IPPrefixList) map[string]IPPrefixList { - byName := map[string]IPPrefixList{} +func byName(prefixLists []ipPrefixList) map[string]ipPrefixList { + byName := map[string]ipPrefixList{} for _, prefixList := range prefixLists { if _, isPresent := byName[prefixList.Name]; isPresent { continue @@ -284,10 +277,10 @@ func byName(prefixLists []IPPrefixList) map[string]IPPrefixList { return byName } -func (i *importRule) routeMaps() []RouteMap { - var result []RouteMap +func (i *importRule) routeMaps() []routeMap { + var result []routeMap - order := RouteMapOrderSeed + order := routeMapOrderSeed byName := byName(i.prefixLists()) names := []string{} @@ -302,24 +295,24 @@ func (i *importRule) routeMaps() []RouteMap { matchVrf := fmt.Sprintf("match source-vrf %s", prefixList.SourceVRF) matchPfxList := fmt.Sprintf("match %s address prefix-list %s", prefixList.AddressFamily, n) entries := []string{matchVrf, matchPfxList} - if strings.HasSuffix(n, IPPrefixListNoExportSuffix) { + if strings.HasSuffix(n, ipPrefixListNoExportSuffix) { entries = append(entries, "set community additive no-export") } - routeMap := RouteMap{ + routeMap := routeMap{ Name: routeMapName(i.TargetVRF), - Policy: Permit.String(), + Policy: permit, Order: order, Entries: entries, } - order += RouteMapOrderSeed + order += routeMapOrderSeed result = append(result, routeMap) } - routeMap := RouteMap{ + routeMap := routeMap{ Name: routeMapName(i.TargetVRF), - Policy: Deny.String(), + Policy: deny, Order: order, } @@ -333,18 +326,18 @@ func routeMapName(vrfName string) string { } func (i *importPrefix) buildSpecs(seq int) []string { - var result []string - var spec string + var ( + result []string + spec string + ) if i.Prefix.Bits() == 0 { spec = fmt.Sprintf("%s %s", i.Policy, i.Prefix) - } else { spec = fmt.Sprintf("seq %d %s %s le %d", seq, i.Policy, i.Prefix, i.Prefix.Addr().BitLen()) } result = append(result, spec) - return result } @@ -355,7 +348,7 @@ func (i *importPrefix) name(targetVrf string, isExported bool) string { suffix = "-ipv6" } if !isExported { - suffix += IPPrefixListNoExportSuffix + suffix += ipPrefixListNoExportSuffix } return fmt.Sprintf("%s-import-from-%s%s", targetVrf, i.SourceVRF, suffix) diff --git a/pkg/frr/routemap_test.go b/pkg/frr/routemap_test.go new file mode 100644 index 0000000..9e03b5d --- /dev/null +++ b/pkg/frr/routemap_test.go @@ -0,0 +1,61 @@ +package frr + +import ( + "log/slog" + "net/netip" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + "github.com/stretchr/testify/require" +) + +func Test_importRulesForNetwork(t *testing.T) { + log := slog.Default() + + tests := []struct { + name string + cfg *Config + network *apiv2.MachineNetwork + want *importRule + }{ + { + name: "primary private network of a firewall", + cfg: &Config{ + Log: log, + Network: network.New(firewallAllocation), + }, + network: firewallAllocation.Networks[0], + want: &importRule{ + TargetVRF: vrfNameOf(firewallAllocation.Networks[0]), + ImportVRFs: []string{ + vrfNameOf(firewallAllocation.Networks[2]), + vrfNameOf(firewallAllocation.Networks[4]), + vrfNameOf(firewallAllocation.Networks[1]), + }, + ImportPrefixes: []importPrefix{ + {Prefix: netip.MustParsePrefix("0.0.0.0/0"), Policy: permit, SourceVRF: vrfNameOf(firewallAllocation.Networks[2])}, + {Prefix: netip.MustParsePrefix("100.127.1.0/24"), Policy: permit, SourceVRF: vrfNameOf(firewallAllocation.Networks[4])}, + {Prefix: netip.MustParsePrefix("185.1.2.3/32"), Policy: deny, SourceVRF: vrfNameOf(firewallAllocation.Networks[2])}, + {Prefix: netip.MustParsePrefix("185.1.2.0/24"), Policy: permit, SourceVRF: vrfNameOf(firewallAllocation.Networks[2])}, + {Prefix: netip.MustParsePrefix("185.27.0.0/22"), Policy: permit, SourceVRF: vrfNameOf(firewallAllocation.Networks[2])}, + {Prefix: netip.MustParsePrefix("100.127.129.0/24"), Policy: permit, SourceVRF: vrfNameOf(firewallAllocation.Networks[4])}, + {Prefix: netip.MustParsePrefix("10.0.18.0/22"), Policy: permit, SourceVRF: vrfNameOf(firewallAllocation.Networks[1])}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := importRulesForNetwork(tt.cfg, tt.network) + require.NoError(t, err) + + if diff := cmp.Diff(tt.want, got, cmp.AllowUnexported(netip.Prefix{}), cmpopts.IgnoreUnexported(netip.Addr{})); diff != "" { + t.Errorf("importRulesForNetwork() diff = %s", diff) + } + }) + } +} diff --git a/pkg/network/testdata/frr.conf.firewall b/pkg/frr/test/frr.conf.firewall similarity index 98% rename from pkg/network/testdata/frr.conf.firewall rename to pkg/frr/test/frr.conf.firewall index e684dba..eba5aae 100644 --- a/pkg/network/testdata/frr.conf.firewall +++ b/pkg/frr/test/frr.conf.firewall @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer frr version 8.5 frr defaults datacenter hostname firewall @@ -205,4 +204,4 @@ route-map LOOPBACKS permit 10 bgp as-path access-list SELF permit ^$ ! line vty -! \ No newline at end of file +! diff --git a/pkg/network/testdata/frr.conf.firewall_dualstack b/pkg/frr/test/frr.conf.firewall_dualstack similarity index 98% rename from pkg/network/testdata/frr.conf.firewall_dualstack rename to pkg/frr/test/frr.conf.firewall_dualstack index 3a2c140..25e29f5 100644 --- a/pkg/network/testdata/frr.conf.firewall_dualstack +++ b/pkg/frr/test/frr.conf.firewall_dualstack @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer frr version 8.5 frr defaults datacenter hostname firewall @@ -215,4 +214,4 @@ route-map LOOPBACKS permit 10 bgp as-path access-list SELF permit ^$ ! line vty -! \ No newline at end of file +! diff --git a/pkg/network/testdata/frr.conf.firewall_frr-10 b/pkg/frr/test/frr.conf.firewall_frr-10 similarity index 98% rename from pkg/network/testdata/frr.conf.firewall_frr-10 rename to pkg/frr/test/frr.conf.firewall_frr-10 index 45a2e01..6400deb 100644 --- a/pkg/network/testdata/frr.conf.firewall_frr-10 +++ b/pkg/frr/test/frr.conf.firewall_frr-10 @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer frr version 8.5 frr defaults datacenter hostname firewall @@ -209,4 +208,4 @@ route-map LOOPBACKS permit 10 bgp as-path access-list SELF permit ^$ ! line vty -! \ No newline at end of file +! diff --git a/pkg/network/testdata/frr.conf.firewall_frr-9 b/pkg/frr/test/frr.conf.firewall_frr-9 similarity index 98% rename from pkg/network/testdata/frr.conf.firewall_frr-9 rename to pkg/frr/test/frr.conf.firewall_frr-9 index e684dba..eba5aae 100644 --- a/pkg/network/testdata/frr.conf.firewall_frr-9 +++ b/pkg/frr/test/frr.conf.firewall_frr-9 @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer frr version 8.5 frr defaults datacenter hostname firewall @@ -205,4 +204,4 @@ route-map LOOPBACKS permit 10 bgp as-path access-list SELF permit ^$ ! line vty -! \ No newline at end of file +! diff --git a/pkg/network/testdata/frr.conf.firewall_ipv6 b/pkg/frr/test/frr.conf.firewall_ipv6 similarity index 98% rename from pkg/network/testdata/frr.conf.firewall_ipv6 rename to pkg/frr/test/frr.conf.firewall_ipv6 index 984ffed..fb259f9 100644 --- a/pkg/network/testdata/frr.conf.firewall_ipv6 +++ b/pkg/frr/test/frr.conf.firewall_ipv6 @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer frr version 8.5 frr defaults datacenter hostname firewall @@ -206,4 +205,4 @@ route-map LOOPBACKS permit 10 bgp as-path access-list SELF permit ^$ ! line vty -! \ No newline at end of file +! diff --git a/pkg/network/testdata/frr.conf.firewall_shared b/pkg/frr/test/frr.conf.firewall_shared similarity index 96% rename from pkg/network/testdata/frr.conf.firewall_shared rename to pkg/frr/test/frr.conf.firewall_shared index 67cead6..92e27cb 100644 --- a/pkg/network/testdata/frr.conf.firewall_shared +++ b/pkg/frr/test/frr.conf.firewall_shared @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer frr version 8.5 frr defaults datacenter hostname firewall @@ -124,4 +123,4 @@ route-map LOOPBACKS permit 10 bgp as-path access-list SELF permit ^$ ! line vty -! \ No newline at end of file +! diff --git a/pkg/network/testdata/frr.conf.machine b/pkg/frr/test/frr.conf.machine similarity index 91% rename from pkg/network/testdata/frr.conf.machine rename to pkg/frr/test/frr.conf.machine index d1daa28..686462e 100644 --- a/pkg/network/testdata/frr.conf.machine +++ b/pkg/frr/test/frr.conf.machine @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer frr version 8.5 frr defaults datacenter hostname machine @@ -57,4 +56,4 @@ route-map only-self-out permit 10 match as-path SELF ! route-map only-self-out deny 99 -! \ No newline at end of file +! diff --git a/pkg/installer/config.go b/pkg/installer/config.go new file mode 100644 index 0000000..f816c18 --- /dev/null +++ b/pkg/installer/config.go @@ -0,0 +1,65 @@ +package installer + +import ( + "context" + "fmt" + "os" + + "buf.build/go/protoyaml" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + v1 "github.com/metal-stack/os-installer/api/v1" + "go.yaml.in/yaml/v3" +) + +// ReadConfigurations returns the configuration that were provided from the metal-hammer, which +// were persisted as configuration files on the disk. +// The installer must have run before calling this function, otherwise the files are not there! +func ReadConfigurations() (*v1.MachineDetails, *apiv2.MachineAllocation, error) { + data, err := os.ReadFile(v1.MachineDetailsPath) + if err != nil { + return nil, nil, fmt.Errorf("unable to read machine details: %w", err) + } + + var details v1.MachineDetails + if err = yaml.Unmarshal(data, &details); err != nil { + return nil, nil, fmt.Errorf("unable to parse machine details: %w", err) + } + + data, err = os.ReadFile(v1.MachineAllocationPath) + if err != nil { + return nil, nil, fmt.Errorf("unable to read machine allocation: %w", err) + } + + var allocation apiv2.MachineAllocation + if err = protoyaml.Unmarshal(data, &allocation); err != nil { + return nil, nil, fmt.Errorf("unable to parse machine allocation: %w", err) + } + + return &details, &allocation, nil +} + +// persistConfigurations writes the configuration data provided from the metal-hammer to the os. +// these can be used again for other applications like the firewall-controller at a later point in time. +func (i *installer) persistConfigurations(context.Context) error { + detailsBytes, err := yaml.Marshal(i.details) + if err != nil { + return fmt.Errorf("unable to marshal machine details: %w", err) + } + + err = i.fs.WriteFile(v1.MachineDetailsPath, detailsBytes, os.ModePerm) + if err != nil { + return fmt.Errorf("unable to persist machine details: %w", err) + } + + allocationBytes, err := protoyaml.Marshal(i.allocation) + if err != nil { + return fmt.Errorf("unable to marshal machine allocation: %w", err) + } + + err = i.fs.WriteFile(v1.MachineAllocationPath, allocationBytes, os.ModePerm) + if err != nil { + return fmt.Errorf("unable to persist machine allocation: %w", err) + } + + return nil +} diff --git a/pkg/installer/installer.go b/pkg/installer/installer.go new file mode 100644 index 0000000..193bb80 --- /dev/null +++ b/pkg/installer/installer.go @@ -0,0 +1,248 @@ +package installer + +import ( + "context" + "fmt" + "log/slog" + "slices" + "time" + + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + operatingsystem "github.com/metal-stack/os-installer/pkg/installer/os" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/spf13/afero" + "go.yaml.in/yaml/v3" +) + +type installer struct { + log *slog.Logger + cfg *v1.Config + oss oscommon.OperatingSystem + fs *afero.Afero + exec *exec.CmdExecutor + details *v1.MachineDetails + allocation *apiv2.MachineAllocation +} + +func New(log *slog.Logger, details *v1.MachineDetails, allocation *apiv2.MachineAllocation) *installer { + log = log.WithGroup("os-installer") + + return &installer{ + log: log, + cfg: &v1.Config{}, + fs: &afero.Afero{ + Fs: afero.OsFs{}, + }, + details: details, + allocation: allocation, + exec: exec.New(log), + } +} + +func (i *installer) Install(ctx context.Context) error { + var ( + start = time.Now() + installerConfig = &v1.Config{} + ) + + if oscommon.FileExists(i.fs, v1.InstallerConfigPath) { + data, err := i.fs.ReadFile(v1.InstallerConfigPath) + if err != nil { + return fmt.Errorf("unable to read installer config: %w", err) + } + + if err = yaml.Unmarshal(data, &installerConfig); err != nil { + return fmt.Errorf("unable to parse installer config: %w", err) + } + } + + oss, err := operatingsystem.New(&oscommon.Config{ + Log: i.log, + Exec: i.exec, + Fs: i.fs, + MachineDetails: i.details, + Allocation: i.allocation, + Name: installerConfig.OsName, + BootloaderID: installerConfig.Overwrites.BootloaderID, + }) + if err != nil { + return fmt.Errorf("os detection failed: %w", err) + } + + i.cfg = installerConfig + i.oss = oss + + if err = i.run(ctx); err != nil { + i.log.Error("running os installer failed", "took", time.Since(start).String()) + return fmt.Errorf("os installer failed: %w", err) + } + + i.log.Info("os installer succeeded", "took", time.Since(start).String()) + + return nil +} + +func (i *installer) run(ctx context.Context) error { + var ( + cmdLine string + ) + + for _, task := range []struct { + name string + fn func(ctx context.Context) error + }{ + { + name: "persist configuration data from metal-hammer", + fn: i.persistConfigurations, + }, + { + name: "check if running in efi mode", + fn: i.validateRunningInEfiMode, + }, + { + name: "remove .dockerenv if running in virtual environment", + fn: i.removeDockerEnv, + }, + { + name: "write hostname", + fn: i.oss.WriteHostname, + }, + { + name: "write /etc/hosts", + fn: i.oss.WriteHosts, + }, + { + name: "write /etc/resolv.conf", + fn: i.oss.WriteResolvConf, + }, + { + name: "write ntp configuration", + fn: i.oss.WriteNTPConf, + }, + { + name: "create metal user", + fn: i.oss.CreateMetalUser, + }, + { + name: "configure network", + fn: i.oss.ConfigureNetwork, + }, + { + name: "authorized ssh keys", + fn: i.oss.CopySSHKeys, + }, + { + name: "fix wrong filesystem permissions", + fn: i.oss.FixPermissions, + }, + { + name: "process userdata", + fn: i.oss.ProcessUserdata, + }, + { + name: "build kernel cmdline", + fn: func(ctx context.Context) error { + l, err := i.oss.BuildCMDLine(ctx) + if err != nil { + return err + } + + cmdLine = l + + return nil + }, + }, + { + name: "write /etc/metal/boot-info.yaml", + fn: func(ctx context.Context) error { + return i.oss.WriteBootInfo(ctx, cmdLine) + }, + }, + { + name: "write booatloader config", + fn: func(ctx context.Context) error { + return i.oss.GrubInstall(ctx, cmdLine) + }, + }, + { + name: "unset machine id", + fn: i.oss.UnsetMachineID, + }, + { + name: "deploy systemd services", + fn: i.oss.SystemdServices, + }, + { + name: "write /etc/metal/build-meta.yaml", + fn: i.oss.WriteBuildMeta, + }, + { + name: "execute custom executable", + fn: i.customExecutable, + }, + } { + var ( + log = i.log.With("task-name", task.name) + start = time.Now() + ) + + if len(i.cfg.Only) > 0 && !slices.Contains(i.cfg.Only, task.name) { + log.Warn("skipping task as defined by installer configuration") + continue + } + + if slices.Contains(i.cfg.Except, task.name) { + log.Warn("skipping task as defined by installer configuration") + continue + } + + log.Info("running install task", "start-at", start.String()) + + if err := task.fn(ctx); err != nil { + i.log.Error("running install task failed", "error", err, "took", time.Since(start).String()) + return fmt.Errorf("installation task failed, aborting install: %w", err) + } + } + + return nil +} + +func (i *installer) validateRunningInEfiMode(ctx context.Context) error { + if !i.isVirtual() && !oscommon.FileExists(i.fs, "/sys/firmware/efi") { + return fmt.Errorf("not running efi mode") + } + + return nil +} + +func (i *installer) removeDockerEnv(_ context.Context) error { + // systemd-detect-virt guesses docker which modifies the behavior of many services. + if !oscommon.FileExists(i.fs, "/.dockerenv") { + return nil + } + + return i.fs.Remove("/.dockerenv") +} + +func (i *installer) isVirtual() bool { + return !oscommon.FileExists(i.fs, "/sys/class/dmi") +} + +func (i *installer) customExecutable(ctx context.Context) error { + if i.cfg.CustomScript == nil { + i.log.Info("no custom executable to execute, skipping") + return nil + } + + _, err := i.exec.Execute(ctx, &exec.Params{ + Name: i.cfg.CustomScript.ExecutablePath, + Dir: i.cfg.CustomScript.WorkDir, + }) + if err != nil { + return fmt.Errorf("custom executable returned an error code: %w", err) + } + + return nil +} diff --git a/pkg/installer/installer_test.go b/pkg/installer/installer_test.go new file mode 100644 index 0000000..5aa0659 --- /dev/null +++ b/pkg/installer/installer_test.go @@ -0,0 +1,59 @@ +package installer + +import ( + "fmt" + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" +) + +func Test_installer_validateRunningInEfiMode(t *testing.T) { + tests := []struct { + name string + fsMocks func(fs *afero.Afero) + wantErr error + }{ + { + name: "is efi", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/sys/firmware/efi", []byte(""), 0755)) + require.NoError(t, fs.WriteFile("/sys/class/dmi", []byte(""), 0755)) + }, + wantErr: nil, + }, + { + name: "is not efi", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/sys/class/dmi", []byte(""), 0755)) + }, + wantErr: fmt.Errorf("not running efi mode"), + }, + { + name: "is not efi but virtual", + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + i := &installer{ + log: slog.Default(), + fs: &afero.Afero{ + Fs: afero.NewMemMapFs(), + }, + } + + if tt.fsMocks != nil { + tt.fsMocks(i.fs) + } + + err := i.validateRunningInEfiMode(t.Context()) + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n %s", diff) + } + }) + } +} diff --git a/pkg/installer/legacy.go b/pkg/installer/legacy.go new file mode 100644 index 0000000..138f688 --- /dev/null +++ b/pkg/installer/legacy.go @@ -0,0 +1,35 @@ +package installer + +import ( + "fmt" + "os" + + v1 "github.com/metal-stack/os-installer/api/v1" + "go.yaml.in/yaml/v3" +) + +func (i *installer) PersistLegacyInstallYaml(installConfig *v1.InstallerConfig) error { + installBytes, err := yaml.Marshal(installConfig) + if err != nil { + return fmt.Errorf("unable to marshal legacy installer config: %w", err) + } + err = i.fs.WriteFile(v1.LegacyInstallPath, installBytes, os.ModePerm) + if err != nil { + return fmt.Errorf("unable to persist legacy installer config: %w", err) + } + return nil +} + +func ReadLegacyInstallYaml() (*v1.InstallerConfig, error) { + data, err := os.ReadFile(v1.LegacyInstallPath) + if err != nil { + return nil, fmt.Errorf("unable to read legacy installer config: %w", err) + } + + var installConfig v1.InstallerConfig + if err = yaml.Unmarshal(data, &installConfig); err != nil { + return nil, fmt.Errorf("unable to parse legacy installer config: %w", err) + } + + return &installConfig, nil +} diff --git a/pkg/installer/os/almalinux/almalinux.go b/pkg/installer/os/almalinux/almalinux.go new file mode 100644 index 0000000..85c22d9 --- /dev/null +++ b/pkg/installer/os/almalinux/almalinux.go @@ -0,0 +1,53 @@ +package almalinux + +import ( + "context" + "log/slog" + + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/network" + "github.com/spf13/afero" +) + +type ( + Os struct { + *oscommon.CommonTasks + log *slog.Logger + details *v1.MachineDetails + allocation *apiv2.MachineAllocation + exec *exec.CmdExecutor + network *network.Network + fs *afero.Afero + } +) + +func New(cfg *oscommon.Config) *Os { + return &Os{ + CommonTasks: oscommon.New(cfg), + log: cfg.Log, + details: cfg.MachineDetails, + allocation: cfg.Allocation, + exec: cfg.Exec, + network: network.New(cfg.Allocation), + fs: cfg.Fs, + } +} + +func (o *Os) SudoGroup() string { + return "wheel" +} + +func (o *Os) BootloaderID() string { + return "almalinux" +} + +func (o *Os) InitramdiskFormatString() string { + return "initramfs-%s.img" +} + +func (o *Os) WriteBootInfo(ctx context.Context, cmdLine string) error { + return o.CommonTasks.WriteBootInfo(ctx, o.InitramdiskFormatString(), o.BootloaderID(), cmdLine) +} diff --git a/pkg/installer/os/almalinux/create_metal_user.go b/pkg/installer/os/almalinux/create_metal_user.go new file mode 100644 index 0000000..2286bd0 --- /dev/null +++ b/pkg/installer/os/almalinux/create_metal_user.go @@ -0,0 +1,28 @@ +package almalinux + +import ( + "context" + "time" + + "github.com/metal-stack/os-installer/pkg/exec" +) + +func (o *Os) CreateMetalUser(ctx context.Context) error { + err := o.CommonTasks.CreateMetalUser(ctx, o.SudoGroup()) + if err != nil { + return err + } + + // otherwise in rescue mode the root account is locked + _, err = o.exec.Execute(ctx, &exec.Params{ + Name: "passwd", + Args: []string{"root"}, + Timeout: 10 * time.Second, + Stdin: o.details.Password + "\n" + o.details.Password + "\n", + }) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/installer/os/almalinux/install_bootloader.go b/pkg/installer/os/almalinux/install_bootloader.go new file mode 100644 index 0000000..793004f --- /dev/null +++ b/pkg/installer/os/almalinux/install_bootloader.go @@ -0,0 +1,120 @@ +package almalinux + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/metal-stack/os-installer/pkg/exec" +) + +const ( + DefaultGrubPath = "/etc/default/grub" + GrubConfigPath = "/boot/efi/EFI/almalinux/grub.cfg" + defaultGrubFileContent = `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=%s +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="%s" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=%s --unit=%s --word=8" +GRUB_DEVICE=UUID=%s +GRUB_ENABLE_BLSCFG=false +` +) + +func (o *Os) GrubInstall(ctx context.Context, cmdLine string) error { + serialPort, serialSpeed, err := o.FigureOutSerialSpeed() + if err != nil { + return err + } + + defaultGrub := fmt.Sprintf(defaultGrubFileContent, o.BootloaderID(), cmdLine, serialSpeed, serialPort, o.details.RootUUID) + + err = o.fs.WriteFile(DefaultGrubPath, []byte(defaultGrub), 0755) + if err != nil { + return err + } + + _, err = o.exec.Execute(ctx, &exec.Params{ + Name: "grub2-mkconfig", + Args: []string{"-o", GrubConfigPath}, + }) + if err != nil { + return err + } + + if o.details.RaidEnabled { + out, err := o.exec.Execute(ctx, &exec.Params{ + Name: "mdadm", + Args: []string{"--examine", "--scan"}, + Timeout: 10 * time.Second, + }) + if err != nil { + return err + } + + out += "\nMAILADDR root\n" + + err = o.fs.WriteFile("/etc/mdadm.conf", []byte(out), 0700) + if err != nil { + return err + } + + out, err = o.exec.Execute(ctx, &exec.Params{ + Name: "blkid", + }) + if err != nil { + return err + } + + for line := range strings.SplitSeq(string(out), "\n") { + if strings.Contains(line, `PARTLABEL="efi"`) { + disk, _, found := strings.Cut(line, ":") + if !found { + return fmt.Errorf("unable to process blkid output lines") + } + + shim := fmt.Sprintf(`\\EFI\\%s\\shimx64.efi`, o.BootloaderID()) + + _, err = o.exec.Execute(ctx, &exec.Params{ + Name: "efibootmgr", + Args: []string{"-c", "-d", disk, "-p1", "-l", shim, "-L", o.BootloaderID()}, + }) + if err != nil { + return err + } + } + } + } + + if !o.details.RaidEnabled { + return nil + } + + v, err := o.GetKernelVersion(o.InitramdiskFormatString()) + if err != nil { + return err + } + + _, err = o.exec.Execute(ctx, &exec.Params{ + Name: "dracut", + Args: []string{ + "--mdadmconf", + "--kver", v, + "--kmoddir", "/lib/modules/" + v, + "--include", "/lib/modules/" + v, "/lib/modules/" + v, + "--fstab", + "--add=dm mdraid", + "--add-drivers=raid0 raid1", + "--hostonly", + "--force", + }, + }) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/installer/os/almalinux/tests/almalinux_test.go b/pkg/installer/os/almalinux/tests/almalinux_test.go new file mode 100644 index 0000000..7821fa3 --- /dev/null +++ b/pkg/installer/os/almalinux/tests/almalinux_test.go @@ -0,0 +1,26 @@ +package almalinux_test + +import ( + "encoding/json" + "fmt" + goos "os" + "testing" + + "github.com/metal-stack/os-installer/pkg/test" + "github.com/stretchr/testify/require" +) + +func TestHelperProcess(t *testing.T) { + if goos.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + var f test.FakeExecParams + err := json.Unmarshal([]byte(goos.Args[3]), &f) + require.NoError(t, err) + + _, err = fmt.Fprint(goos.Stdout, f.Output) + require.NoError(t, err) + + goos.Exit(f.ExitCode) +} diff --git a/pkg/installer/os/almalinux/tests/create_metal_user_test.go b/pkg/installer/os/almalinux/tests/create_metal_user_test.go new file mode 100644 index 0000000..3aae01b --- /dev/null +++ b/pkg/installer/os/almalinux/tests/create_metal_user_test.go @@ -0,0 +1,117 @@ +package almalinux_test + +import ( + "log/slog" + "os/user" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/metal-stack/os-installer/pkg/installer/os/almalinux" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" +) + +func Test_os_CreateMetalUser(t *testing.T) { + tests := []struct { + name string + details *v1.MachineDetails + execMocks []test.FakeExecParams + lookupUserFn oscommon.LookupUserFn + want string + wantErr error + }{ + { + name: "create user already exists", + details: &v1.MachineDetails{ + Password: "abc", + }, + lookupUserFn: func(name string) (*user.User, error) { + return &user.User{ + Uid: "1000", + Gid: "1000", + Username: oscommon.MetalUser, + Name: oscommon.MetalUser, + HomeDir: "/home/metal", + }, nil + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"userdel", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"useradd", "--create-home", "--uid", "1000", "--gid", "wheel", "--shell", "/bin/bash", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"passwd", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"passwd", "root"}, + Output: "", + ExitCode: 0, + }, + }, + }, + { + name: "create user does not yet exist", + details: &v1.MachineDetails{ + Password: "abc", + }, + lookupUserFn: func(name string) (*user.User, error) { + return nil, user.UnknownUserError(oscommon.MetalUser) + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"useradd", "--create-home", "--uid", "1000", "--gid", "wheel", "--shell", "/bin/bash", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"passwd", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"passwd", "root"}, + Output: "", + ExitCode: 0, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + d := almalinux.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + LookupUserFn: tt.lookupUserFn, + }) + + gotErr := d.CreateMetalUser(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + }) + } +} diff --git a/pkg/installer/os/almalinux/tests/install_bootloader_test.go b/pkg/installer/os/almalinux/tests/install_bootloader_test.go new file mode 100644 index 0000000..8cc5072 --- /dev/null +++ b/pkg/installer/os/almalinux/tests/install_bootloader_test.go @@ -0,0 +1,172 @@ +package almalinux_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/metal-stack/os-installer/pkg/installer/os/almalinux" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + sampleMdadmScanOutput = `ARRAY /dev/md/0 metadata=1.0 UUID=42d10089:ee1e0399:445e7550:62b63ec8 name=any:0 +ARRAY /dev/md/1 metadata=1.0 UUID=543eb7f8:98d4d986:e669824d:bebe69e5 name=any:1 +ARRAY /dev/md/2 metadata=1.0 UUID=fc32a6f0:ee40d9db:87c8c9f3:a8400c8b name=any:2` + + sampleBlkidOutput = `/dev/sda1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="cc57c456-0b2f-6345-c597-d861cc6dd8ac" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="273985c8-d097-4123-bcd0-80b4e4e14728" +/dev/sda2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="54748c60-b566-f391-142c-fb78bb1fc6a9" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="d7863f4e-af7c-47fc-8c03-6ecdc69bc72d" +/dev/sda3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="582e9b4f-f191-e01e-85fd-2f7d969fbef6" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="e8b44f09-b7f7-4e0d-a7c3-d909617d1f05" +/dev/sdb1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="61bd5d8b-1bb8-673b-9e61-8c28dccc3812" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="13a4c568-57b0-4259-9927-9ac023aaa5f0" +/dev/sdb2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="e7d01e93-9340-5b90-68f8-d8f815595132" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="ab11cd86-37b8-4bae-81e5-21fe0a9c9ae0" +/dev/sdb3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="764217ad-1591-a83a-c799-23397f968729" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="9afbf9c1-b2ba-4b46-8db1-e802d26c93b6" +/dev/md1: LABEL="root" UUID="ace079b5-06be-4429-bbf0-081ea4d7d0d9" TYPE="ext4" +/dev/md0: LABEL="efi" UUID="C236-297F" TYPE="vfat" +/dev/md2: LABEL="varlib" UUID="385e8e8e-dbfd-481e-93a4-cba7f4d5fa02" TYPE="ext4"` +) + +func Test_os_GrubInstall(t *testing.T) { + tests := []struct { + name string + cmdLine string + details *v1.MachineDetails + fsMocks func(fs *afero.Afero) + execMocks []test.FakeExecParams + want string + wantErr error + }{ + { + name: "without raid", + cmdLine: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", + details: &v1.MachineDetails{ + Console: "ttyS1,115200n8", + RootUUID: "78cd4dfe-8825-4f45-816e-d284adb0261e", + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"grub2-mkconfig", "-o", almalinux.GrubConfigPath}, + Output: "", + ExitCode: 0, + }, + }, + want: `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=almalinux +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" +GRUB_DEVICE=UUID=78cd4dfe-8825-4f45-816e-d284adb0261e +GRUB_ENABLE_BLSCFG=false +`, + }, + { + name: "with raid", + cmdLine: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", + details: &v1.MachineDetails{ + RaidEnabled: true, + RootUUID: "ace079b5-06be-4429-bbf0-081ea4d7d0d9", + Console: "ttyS1,115200n8", + }, + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-5.14.0-503.19.1.el9_5.x86_64", []byte{}, 0600)) + require.NoError(t, fs.WriteFile("/boot/vmlinuz-5.14.0-503.19.1.el9_5.x86_64", []byte{}, 0755)) + require.NoError(t, fs.WriteFile("/boot/initramfs-5.14.0-503.19.1.el9_5.x86_64.img", []byte{}, 0600)) + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"grub2-mkconfig", "-o", almalinux.GrubConfigPath}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"mdadm", "--examine", "--scan"}, + Output: sampleMdadmScanOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"blkid"}, + Output: sampleBlkidOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sda1", "-p1", "-l", "\\\\EFI\\\\almalinux\\\\shimx64.efi", "-L", "almalinux"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sdb1", "-p1", "-l", "\\\\EFI\\\\almalinux\\\\shimx64.efi", "-L", "almalinux"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{ + "dracut", + "--mdadmconf", + "--kver", "5.14.0-503.19.1.el9_5.x86_64", + "--kmoddir", "/lib/modules/5.14.0-503.19.1.el9_5.x86_64", + "--include", "/lib/modules/5.14.0-503.19.1.el9_5.x86_64", "/lib/modules/5.14.0-503.19.1.el9_5.x86_64", + "--fstab", + "--add=dm mdraid", + "--add-drivers=raid0 raid1", + "--hostonly", + "--force", + }, + Output: "", + ExitCode: 0, + }, + }, + want: `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=almalinux +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" +GRUB_DEVICE=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 +GRUB_ENABLE_BLSCFG=false +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := almalinux.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + }) + + gotErr := d.GrubInstall(t.Context(), tt.cmdLine) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(almalinux.DefaultGrubPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/almalinux/tests/write_ntp_conf_test.go b/pkg/installer/os/almalinux/tests/write_ntp_conf_test.go new file mode 100644 index 0000000..96b58f6 --- /dev/null +++ b/pkg/installer/os/almalinux/tests/write_ntp_conf_test.go @@ -0,0 +1,134 @@ +package almalinux_test + +import ( + "fmt" + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/metal-stack/os-installer/pkg/installer/os/almalinux" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_WriteNTPConf(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + fsMocks func(fs *afero.Afero) + want string + wantErr error + }{ + { + name: "configure custom ntp", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(almalinux.ChronyConfigPath, []byte(""), 0644)) + }, + allocation: &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + NtpServers: []*apiv2.NTPServer{ + {Address: "custom.1.ntp.org"}, + {Address: "custom.2.ntp.org"}, + }, + }, + want: `# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usable directives. + +# In case no custom NTP server is provided +# Cloudflare offers a free public time service that allows us to use their +# anycast network of 180+ locations to synchronize time from their closest server. +# See https://blog.cloudflare.com/secure-time/ +pool custom.1.ntp.org iburst +pool custom.2.ntp.org iburst + +# This directive specify the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# This directive specify the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. +rtcsync + +# Step the system clock instead of slewing it if the adjustment is larger than +# one second, but only in the first three clock updates. +makestep 1 3 +`, + wantErr: nil, + }, + { + name: "use default ntp", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(almalinux.ChronyConfigPath, []byte(""), 0644)) + }, + allocation: &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + }, + want: "", + wantErr: nil, + }, + { + name: "firewalls are not possible", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(almalinux.ChronyConfigPath, []byte(""), 0644)) + }, + allocation: &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + }, + want: "", + wantErr: fmt.Errorf("almalinux as firewall is currently not supported"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := almalinux.New(&oscommon.Config{ + Log: log, + Fs: fs, + Allocation: tt.allocation, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + gotErr := d.WriteNTPConf(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(almalinux.ChronyConfigPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/almalinux/write_ntp_conf.go b/pkg/installer/os/almalinux/write_ntp_conf.go new file mode 100644 index 0000000..5168c76 --- /dev/null +++ b/pkg/installer/os/almalinux/write_ntp_conf.go @@ -0,0 +1,45 @@ +package almalinux + +import ( + "context" + "fmt" + + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/services/chrony" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" +) + +const ( + ChronyConfigPath = "/etc/chrony.conf" +) + +func (o *Os) WriteNTPConf(ctx context.Context) error { + if o.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL { + return fmt.Errorf("almalinux as firewall is currently not supported") + } + + if len(o.allocation.NtpServers) == 0 { + return nil + } + + var ntpServers []string + + for _, ntp := range o.allocation.NtpServers { + ntpServers = append(ntpServers, ntp.Address) + } + + r, err := renderer.New(&renderer.Config{ + Log: o.log, + TemplateString: chrony.ChronyConfigTemplateString, + Data: chrony.TemplateData{ + NTPServers: ntpServers, + }, + Fs: o.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, ChronyConfigPath) + return err +} diff --git a/pkg/installer/os/common/cmd_line.go b/pkg/installer/os/common/cmd_line.go new file mode 100644 index 0000000..a2a25ac --- /dev/null +++ b/pkg/installer/os/common/cmd_line.go @@ -0,0 +1,99 @@ +package oscommon + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/metal-stack/os-installer/pkg/exec" +) + +func (d *CommonTasks) BuildCMDLine(ctx context.Context) (string, error) { + parts := []string{ + fmt.Sprintf("console=%s", d.details.Console), + fmt.Sprintf("root=UUID=%s", d.details.RootUUID), + "init=/sbin/init", + "net.ifnames=0", + "biosdevname=0", + "nvme_core.io_timeout=300", // 300 sec should be enough for firewalls to be replaced + } + + mdUUID, found, err := d.findMDUUID(ctx) + if err != nil { + return "", err + } + + if found { + mdParts := []string{ + "rdloaddriver=raid0", + "rdloaddriver=raid1", + fmt.Sprintf("rd.md.uuid=%s", mdUUID), + } + + parts = append(parts, mdParts...) + } + + return strings.Join(parts, " "), nil +} + +func (d *CommonTasks) findMDUUID(ctx context.Context) (mdUUID string, found bool, err error) { + d.log.Debug("detect software raid uuid") + + if !d.details.RaidEnabled { + return "", false, nil + } + + blkidOut, err := d.exec.Execute(ctx, &exec.Params{ + Name: "blkid", + Timeout: 10 * time.Second, + }) + if err != nil { + return "", false, fmt.Errorf("unable to run blkid: %w", err) + } + + if d.details.RootUUID == "" { + return "", false, fmt.Errorf("no root uuid set in machine details") + } + + var ( + rootUUID = d.details.RootUUID + rootDisk string + ) + + for line := range strings.SplitSeq(string(blkidOut), "\n") { + if strings.Contains(line, rootUUID) { + rd, _, found := strings.Cut(line, ":") + if found { + rootDisk = strings.TrimSpace(rd) + break + } + } + } + if rootDisk == "" { + return "", false, fmt.Errorf("unable to detect rootdisk") + } + + mdadmOut, err := d.exec.Execute(ctx, &exec.Params{ + Name: "mdadm", + Args: []string{"--detail", "--export", rootDisk}, + Timeout: 10 * time.Second, + }) + if err != nil { + return "", false, fmt.Errorf("unable to run mdadm: %w", err) + } + + for line := range strings.SplitSeq(string(mdadmOut), "\n") { + _, md, found := strings.Cut(line, "MD_UUID=") + if found { + mdUUID = md + break + } + } + + if mdUUID == "" { + return "", false, fmt.Errorf("unable to detect md root disk") + } + + return mdUUID, true, nil +} diff --git a/pkg/installer/os/common/cmd_line_test.go b/pkg/installer/os/common/cmd_line_test.go new file mode 100644 index 0000000..5d3d30b --- /dev/null +++ b/pkg/installer/os/common/cmd_line_test.go @@ -0,0 +1,108 @@ +package oscommon + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" +) + +const ( + sampleBlkidOutput = `/dev/sda1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="cc57c456-0b2f-6345-c597-d861cc6dd8ac" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="273985c8-d097-4123-bcd0-80b4e4e14728" +/dev/sda2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="54748c60-b566-f391-142c-fb78bb1fc6a9" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="d7863f4e-af7c-47fc-8c03-6ecdc69bc72d" +/dev/sda3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="582e9b4f-f191-e01e-85fd-2f7d969fbef6" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="e8b44f09-b7f7-4e0d-a7c3-d909617d1f05" +/dev/sdb1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="61bd5d8b-1bb8-673b-9e61-8c28dccc3812" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="13a4c568-57b0-4259-9927-9ac023aaa5f0" +/dev/sdb2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="e7d01e93-9340-5b90-68f8-d8f815595132" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="ab11cd86-37b8-4bae-81e5-21fe0a9c9ae0" +/dev/sdb3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="764217ad-1591-a83a-c799-23397f968729" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="9afbf9c1-b2ba-4b46-8db1-e802d26c93b6" +/dev/md1: LABEL="root" UUID="ace079b5-06be-4429-bbf0-081ea4d7d0d9" TYPE="ext4" +/dev/md0: LABEL="efi" UUID="C236-297F" TYPE="vfat" +/dev/md2: LABEL="varlib" UUID="385e8e8e-dbfd-481e-93a4-cba7f4d5fa02" TYPE="ext4"` + sampleMdadmDetailOutput = `MD_LEVEL=raid1 +MD_DEVICES=2 +MD_METADATA=1.0 +MD_UUID=543eb7f8:98d4d986:e669824d:bebe69e5 +MD_DEVNAME=1 +MD_NAME=any:1 +MD_DEVICE_dev_sdb2_ROLE=1 +MD_DEVICE_dev_sdb2_DEV=/dev/sdb2 +MD_DEVICE_dev_sda2_ROLE=0 +MD_DEVICE_dev_sda2_DEV=/dev/sda2` +) + +func TestDefaultOS_findMDUUID(t *testing.T) { + tests := []struct { + name string + details *v1.MachineDetails + execMocks []test.FakeExecParams + want string + wantFound bool + wantErr error + }{ + { + name: "no raid", + details: &v1.MachineDetails{ + RaidEnabled: false, + }, + want: "", + wantFound: false, + wantErr: nil, + }, + { + name: "with raid", + details: &v1.MachineDetails{ + RootUUID: "ace079b5-06be-4429-bbf0-081ea4d7d0d9", + RaidEnabled: true, + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"blkid"}, + Output: sampleBlkidOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"mdadm", "--detail", "--export", "/dev/md1"}, + Output: sampleMdadmDetailOutput, + ExitCode: 0, + }, + }, + want: "543eb7f8:98d4d986:e669824d:bebe69e5", + wantFound: true, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log := slog.Default() + + d := New(&Config{ + Log: log, + Fs: &afero.Afero{ + Fs: afero.NewMemMapFs(), + }, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + }) + + got, gotFound, gotErr := d.findMDUUID(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("diff (+got -want):\n %s", diff) + } + + if diff := cmp.Diff(tt.wantFound, gotFound); diff != "" { + t.Errorf("diff (+got -want):\n %s", diff) + } + }) + } +} diff --git a/pkg/installer/os/common/configure_network.go b/pkg/installer/os/common/configure_network.go new file mode 100644 index 0000000..9db5dcc --- /dev/null +++ b/pkg/installer/os/common/configure_network.go @@ -0,0 +1,46 @@ +package oscommon + +import ( + "context" + "fmt" + + "github.com/metal-stack/os-installer/pkg/frr" + "github.com/metal-stack/os-installer/pkg/interfaces" + "github.com/metal-stack/os-installer/pkg/nftables" +) + +func (d *CommonTasks) ConfigureNetwork(ctx context.Context) error { + if err := interfaces.ConfigureInterfaces(ctx, &interfaces.Config{ + Log: d.log, + Network: d.network, + Nics: d.details.Nics, + }); err != nil { + return fmt.Errorf("error configuring interfaces: %w", err) + } + + if _, err := frr.Render(ctx, &frr.Config{ + Log: d.log, + Reload: false, + Validate: true, + Network: d.network, + }); err != nil { + return fmt.Errorf("unable to render frr config: %w", err) + } + + if d.network.IsMachine() { + return nil + } + + if _, err := nftables.Render(ctx, &nftables.Config{ + Log: d.log, + Reload: false, + Validate: true, + Network: d.network, + EnableDNSProxy: false, + ForwardPolicy: nftables.ForwardPolicyDrop, + }); err != nil { + return fmt.Errorf("unable to render nftables config: %w", err) + } + + return nil +} diff --git a/pkg/installer/os/common/copy_ssh_keys.go b/pkg/installer/os/common/copy_ssh_keys.go new file mode 100644 index 0000000..87ebbb3 --- /dev/null +++ b/pkg/installer/os/common/copy_ssh_keys.go @@ -0,0 +1,46 @@ +package oscommon + +import ( + "context" + "path" + "strconv" + "strings" +) + +func (d *CommonTasks) CopySSHKeys(ctx context.Context) error { + u, err := d.lookupUserFn(MetalUser) + if err != nil { + return err + } + + uid, err := strconv.Atoi(u.Uid) + if err != nil { + return err + } + gid, err := strconv.Atoi(u.Gid) + if err != nil { + return err + } + + var ( + sshPath = path.Join(u.HomeDir, ".ssh") + sshAuthorizedKeysPath = path.Join(sshPath, "authorized_keys") + ) + + err = d.fs.MkdirAll(sshPath, 0700) + if err != nil { + return err + } + + err = d.fs.Chown(sshPath, uid, gid) + if err != nil { + return err + } + + err = d.fs.WriteFile(sshAuthorizedKeysPath, []byte(strings.Join(d.allocation.SshPublicKeys, "\n")), 0600) + if err != nil { + return err + } + + return d.fs.Chown(sshAuthorizedKeysPath, uid, gid) +} diff --git a/pkg/installer/os/common/create_metal_user.go b/pkg/installer/os/common/create_metal_user.go new file mode 100644 index 0000000..6501011 --- /dev/null +++ b/pkg/installer/os/common/create_metal_user.go @@ -0,0 +1,59 @@ +package oscommon + +import ( + "context" + "fmt" + "os/user" + "time" + + "github.com/metal-stack/os-installer/pkg/exec" +) + +const ( + MetalUser = "metal" +) + +type LookupUserFn func(name string) (*user.User, error) + +func (d *CommonTasks) CreateMetalUser(ctx context.Context, sudoGroup string) error { + u, err := d.lookupUserFn(MetalUser) + if err != nil { + if err.Error() != user.UnknownUserError(MetalUser).Error() { + return err + } + } + + if u != nil { + d.log.Debug("user already exists, recreating") + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "userdel", + Args: []string{MetalUser}, + Timeout: 10 * time.Second, + }) + if err != nil { + return err + } + } + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "useradd", + Args: []string{"--create-home", "--uid", "1000", "--gid", sudoGroup, "--shell", "/bin/bash", MetalUser}, + Timeout: 10 * time.Second, + }) + if err != nil { + return err + } + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "passwd", + Args: []string{MetalUser}, + Timeout: 10 * time.Second, + Stdin: d.details.Password + "\n" + d.details.Password + "\n", + }) + if err != nil { + return fmt.Errorf("unable to set password for metal user: %w", err) + } + + return nil +} diff --git a/pkg/installer/os/common/defaultos.go b/pkg/installer/os/common/defaultos.go new file mode 100644 index 0000000..384bf8b --- /dev/null +++ b/pkg/installer/os/common/defaultos.go @@ -0,0 +1,37 @@ +package oscommon + +import "context" + +type ( + DefaultOS struct { + *CommonTasks + bootloaderID *string + } +) + +func NewDefaultOS(cfg *Config) *DefaultOS { + return &DefaultOS{ + CommonTasks: New(cfg), + bootloaderID: cfg.BootloaderID, + } +} + +func (o *DefaultOS) BootloaderID() string { + if o.bootloaderID == nil { + panic("no bootloader id provided for default os") + } + + return *o.bootloaderID +} + +func (o *DefaultOS) WriteBootInfo(ctx context.Context, cmdLine string) error { + return o.CommonTasks.WriteBootInfo(ctx, o.InitramdiskFormatString(), o.BootloaderID(), cmdLine) +} + +func (o *DefaultOS) CreateMetalUser(ctx context.Context) error { + return o.CommonTasks.CreateMetalUser(ctx, o.SudoGroup()) +} + +func (o *DefaultOS) GrubInstall(ctx context.Context, cmdLine string) error { + return o.CommonTasks.GrubInstall(ctx, o.BootloaderID(), cmdLine) +} diff --git a/pkg/installer/os/common/fix_permissions.go b/pkg/installer/os/common/fix_permissions.go new file mode 100644 index 0000000..8409fef --- /dev/null +++ b/pkg/installer/os/common/fix_permissions.go @@ -0,0 +1,19 @@ +package oscommon + +import ( + "context" + "io/fs" +) + +func (d *CommonTasks) FixPermissions(ctx context.Context) error { + for p, perm := range map[string]fs.FileMode{ + "/var/tmp": 01777, + } { + err := d.fs.Chmod(p, perm) + if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/installer/os/common/install_bootloader.go b/pkg/installer/os/common/install_bootloader.go new file mode 100644 index 0000000..6a62db4 --- /dev/null +++ b/pkg/installer/os/common/install_bootloader.go @@ -0,0 +1,155 @@ +package oscommon + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/spf13/afero" +) + +const ( + DefaultGrubPath = "/etc/default/grub" + defaultGrubFileContent = `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=%s +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="%s" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=%s --unit=%s --word=8" +` +) + +func (d *CommonTasks) GrubInstall(ctx context.Context, bootloaderID, cmdLine string) error { + serialPort, serialSpeed, err := d.FigureOutSerialSpeed() + if err != nil { + return err + } + + defaultGrub := fmt.Sprintf(defaultGrubFileContent, bootloaderID, cmdLine, serialSpeed, serialPort) + + err = d.fs.WriteFile(DefaultGrubPath, []byte(defaultGrub), 0755) + if err != nil { + return err + } + + grubInstallArgs := []string{ + "--target=x86_64-efi", + "--efi-directory=/boot/efi", + "--boot-directory=/boot", + "--bootloader-id=" + bootloaderID, + "--removable", + } + + if d.details.RaidEnabled { + grubInstallArgs = append(grubInstallArgs, "--no-nvram") + + out, err := d.exec.Execute(ctx, &exec.Params{ + Name: "mdadm", + Args: []string{"--examine", "--scan"}, + Timeout: 10 * time.Second, + }) + if err != nil { + return err + } + + out += "\nMAILADDR root\n" + + err = afero.WriteFile(d.fs, "/etc/mdadm.conf", []byte(out), 0700) + if err != nil { + return err + } + + err = d.fs.MkdirAll("/var/lib/initramfs-tools", 0755) + if err != nil { + return err + } + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "update-initramfs", + Args: []string{"-u"}, + }) + if err != nil { + return err + } + + out, err = d.exec.Execute(ctx, &exec.Params{ + Name: "blkid", + }) + if err != nil { + return err + } + + for line := range strings.SplitSeq(string(out), "\n") { + if strings.Contains(line, `PARTLABEL="efi"`) { + disk, _, found := strings.Cut(line, ":") + if !found { + return fmt.Errorf("unable to process blkid output lines") + } + + shim := fmt.Sprintf(`\\EFI\\%s\\grubx64.efi`, bootloaderID) + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "efibootmgr", + Args: []string{"-c", "-d", disk, "-p1", "-l", shim, "-L", bootloaderID}, + }) + if err != nil { + return err + } + } + } + } + + if !runFromCI() { + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "grub-install", + Args: grubInstallArgs, + }) + if err != nil { + return err + } + } + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "update-grub2", + }) + if err != nil { + return err + } + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "dpkg-reconfigure", + Args: []string{"grub-efi-amd64-bin"}, + Env: []string{ + "DEBCONF_NONINTERACTIVE_SEEN=true", + "DEBIAN_FRONTEND=noninteractive", + }, + }) + if err != nil { + return err + } + + return nil +} + +func (d *CommonTasks) FigureOutSerialSpeed() (serialPort, serialSpeed string, err error) { + // ttyS1,115200n8 + serialPort, serialSpeed, found := strings.Cut(d.details.Console, ",") + if !found { + return "", "", fmt.Errorf("serial console could not be split into port and speed") + } + + _, serialPort, found = strings.Cut(serialPort, "ttyS") + if !found { + return "", "", fmt.Errorf("serial port could not be split") + } + + serialSpeed, _, found = strings.Cut(serialSpeed, "n8") + if !found { + return "", "", fmt.Errorf("serial speed could not be split") + } + + return +} diff --git a/pkg/installer/os/common/oscommon.go b/pkg/installer/os/common/oscommon.go new file mode 100644 index 0000000..a796ce3 --- /dev/null +++ b/pkg/installer/os/common/oscommon.go @@ -0,0 +1,210 @@ +package oscommon + +import ( + "context" + "fmt" + "log/slog" + "os" + "os/user" + "path" + "strconv" + "strings" + + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/metal-stack/os-installer/pkg/network" + "github.com/spf13/afero" +) + +type ( + OperatingSystem interface { + WriteHostname(ctx context.Context) error + WriteHosts(ctx context.Context) error + WriteResolvConf(ctx context.Context) error + WriteNTPConf(ctx context.Context) error + CreateMetalUser(ctx context.Context) error + ConfigureNetwork(ctx context.Context) error + CopySSHKeys(ctx context.Context) error + FixPermissions(ctx context.Context) error + ProcessUserdata(ctx context.Context) error + BuildCMDLine(ctx context.Context) (string, error) + WriteBootInfo(ctx context.Context, cmdLine string) error + GrubInstall(ctx context.Context, cmdLine string) error + UnsetMachineID(ctx context.Context) error + SystemdServices(ctx context.Context) error + WriteBuildMeta(ctx context.Context) error + + SudoGroup() string + InitramdiskFormatString() string + BootloaderID() string + } + + Config struct { + Log *slog.Logger + Fs *afero.Afero + Exec *exec.CmdExecutor + MachineDetails *v1.MachineDetails + Allocation *apiv2.MachineAllocation + LookupUserFn LookupUserFn + + // customization options from installer config + Name *string + BootloaderID *string + } + + CommonTasks struct { + log *slog.Logger + fs *afero.Afero + details *v1.MachineDetails + allocation *apiv2.MachineAllocation + exec *exec.CmdExecutor + network *network.Network + lookupUserFn LookupUserFn + } +) + +func New(cfg *Config) *CommonTasks { + lookupUserFn := user.Lookup + if cfg.LookupUserFn != nil { + lookupUserFn = cfg.LookupUserFn + } + + return &CommonTasks{ + log: cfg.Log, + fs: cfg.Fs, + details: cfg.MachineDetails, + allocation: cfg.Allocation, + exec: cfg.Exec, + network: network.New(cfg.Allocation), + lookupUserFn: lookupUserFn, + } +} + +func (d *CommonTasks) SudoGroup() string { + return "sudo" +} + +func (d *CommonTasks) BootloaderID() string { + panic("common tasks do not provide a bootloader id") +} + +func (d *CommonTasks) InitramdiskFormatString() string { + return "initrd.img-%s" +} + +func (d *CommonTasks) GetKernelVersion(initramdiskFormatString string) (string, error) { + kern, _, err := d.KernelAndInitrdPath(initramdiskFormatString) + if err != nil { + return "", err + } + + _, version, found := strings.Cut(kern, "vmlinuz-") + if !found { + return "", fmt.Errorf("unable to determine kernel version from: %s", kern) + } + + return version, nil +} + +func (d *CommonTasks) KernelAndInitrdPath(initramdiskFormatString string) (kern string, initrd string, err error) { + // Debian 10 + // root@1f223b59051bcb12:/boot# ls -l + // total 83500 + // -rw-r--r-- 1 root root 83 Aug 13 15:25 System.map-5.10.0-17-amd64 + // -rw-r--r-- 1 root root 236286 Aug 13 15:25 config-5.10.0-17-amd64 + // -rw-r--r-- 1 root root 93842 Jul 19 2021 config-5.10.51 + // drwxr-xr-x 2 root root 4096 Oct 3 11:21 grub + // -rw-r--r-- 1 root root 34665690 Oct 3 11:22 initrd.img-5.10.0-17-amd64 + // lrwxrwxrwx 1 root root 21 Jul 19 2021 vmlinux -> /boot/vmlinux-5.10.51 + // -rwxr-xr-x 1 root root 43526368 Jul 19 2021 vmlinux-5.10.51 + // -rw-r--r-- 1 root root 6962816 Aug 13 15:25 vmlinuz-5.10.0-17-amd64 + + // Ubuntu 20.04 + // root@568551f94559b121:~# ls -l /boot/ + // total 83500 + // -rw-r--r-- 1 root root 83 Aug 13 15:25 System.map-5.10.0-17-amd64 + // -rw-r--r-- 1 root root 236286 Aug 13 15:25 config-5.10.0-17-amd64 + // -rw-r--r-- 1 root root 93842 Jul 19 2021 config-5.10.51 + // drwxr-xr-x 2 root root 4096 Oct 3 11:21 grub + // -rw-r--r-- 1 root root 34665690 Oct 3 11:22 initrd.img-5.10.0-17-amd64 + // lrwxrwxrwx 1 root root 21 Jul 19 2021 vmlinux -> /boot/vmlinux-5.10.51 + // -rwxr-xr-x 1 root root 43526368 Jul 19 2021 vmlinux-5.10.51 + // -rw-r--r-- 1 root root 6962816 Aug 13 15:25 vmlinuz-5.10.0-17-amd64 + + // Almalinux 9 + // [root@14231d4e67d28390 ~]# ls -l /boot/ + // total 160420 + // -rw------- 1 root root 8876661 Jan 7 23:19 System.map-5.14.0-503.19.1.el9_5.x86_64 + // -rw-r--r-- 1 root root 93842 Jul 19 2021 config-5.10.51 + // -rw-r--r-- 1 root root 226249 Jan 7 23:19 config-5.14.0-503.19.1.el9_5.x86_64 + // drwx------ 3 root root 4096 Jun 8 2022 efi + // drwx------ 3 root root 4096 Jan 9 08:02 grub2 + // -rw------- 1 root root 97054329 Jan 9 08:04 initramfs-5.14.0-503.19.1.el9_5.x86_64.img + // drwxr-xr-x 3 root root 4096 Jan 9 08:02 loader + // lrwxrwxrwx 1 root root 52 Jan 9 08:03 symvers-5.14.0-503.19.1.el9_5.x86_64.gz -> /lib/modules/5.14.0-503.19.1.el9_5.x86_64/symvers.gz + // lrwxrwxrwx 1 root root 21 Jul 19 2021 vmlinux -> /boot/vmlinux-5.10.51 + // -rwxr-xr-x 1 root root 43526368 Jul 19 2021 vmlinux-5.10.51 + // -rwxr-xr-x 1 root root 14467384 Jan 7 23:19 vmlinuz-5.14.0-503.19.1.el9_5.x86_64 + + var ( + bootPartition = "/boot" + systemMapPrefix = "/boot/System.map-" + ) + + systemMaps, err := afero.Glob(d.fs, systemMapPrefix+"*") + if err != nil { + return "", "", fmt.Errorf("unable to find a System.map, probably no kernel installed: %w", err) + } + if len(systemMaps) != 1 { + return "", "", fmt.Errorf("no single System.map found (%v), probably no kernel or more than one kernel installed", systemMaps) + } + + systemMap := systemMaps[0] + _, kernelVersion, found := strings.Cut(systemMap, systemMapPrefix) + if !found { + return "", "", fmt.Errorf("unable to detect kernel version in System.map: %q", systemMap) + } + + kern = path.Join(bootPartition, "vmlinuz"+"-"+kernelVersion) + if !d.fileExists(kern) { + return "", "", fmt.Errorf("kernel image %q not found", kern) + } + + initrd = path.Join(bootPartition, fmt.Sprintf(initramdiskFormatString, kernelVersion)) + if !d.fileExists(initrd) { + return "", "", fmt.Errorf("ramdisk %q not found", initrd) + } + + d.log.Info("detect kernel and initrd", "kernel", kern, "initrd", initrd) + + return +} + +func (d *CommonTasks) fileExists(filename string) bool { + info, err := d.fs.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +func runFromCI() bool { + ciEnv := os.Getenv("INSTALL_FROM_CI") + + ci, err := strconv.ParseBool(ciEnv) + if err != nil { + return false + } + + return ci +} + +func FileExists(fs *afero.Afero, filename string) bool { + info, err := fs.Stat(filename) + if os.IsNotExist(err) { + return false + } + + return !info.IsDir() +} diff --git a/pkg/installer/os/common/oscommon_test.go b/pkg/installer/os/common/oscommon_test.go new file mode 100644 index 0000000..b44e21f --- /dev/null +++ b/pkg/installer/os/common/oscommon_test.go @@ -0,0 +1,26 @@ +package oscommon + +import ( + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/metal-stack/os-installer/pkg/test" + "github.com/stretchr/testify/require" +) + +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + var f test.FakeExecParams + err := json.Unmarshal([]byte(os.Args[3]), &f) + require.NoError(t, err) + + _, err = fmt.Fprint(os.Stdout, f.Output) + require.NoError(t, err) + + os.Exit(f.ExitCode) +} diff --git a/pkg/installer/os/common/process_userdata.go b/pkg/installer/os/common/process_userdata.go new file mode 100644 index 0000000..6521021 --- /dev/null +++ b/pkg/installer/os/common/process_userdata.go @@ -0,0 +1,89 @@ +package oscommon + +import ( + "context" + "strings" + + "github.com/metal-stack/os-installer/pkg/exec" + + ignitionConfig "github.com/flatcar/ignition/config/v2_4" +) + +const ( + UserdataPath = "/etc/metal/userdata" + ignitionUserdataPath = "/config.ign" +) + +func (d *CommonTasks) ProcessUserdata(ctx context.Context) error { + if ok := d.fileExists(UserdataPath); !ok { + d.log.Info("no userdata present, not processing userdata", "path", UserdataPath) + return nil + } + + content, err := d.fs.ReadFile(UserdataPath) + if err != nil { + return err + } + + defer func() { + out, err := d.exec.Execute(ctx, &exec.Params{ + Name: "systemctl", + Args: []string{"preset-all"}, + }) + if err != nil { + d.log.Error("error when running systemctl preset-all, continuing anyway", "error", err, "output", string(out)) + } + }() + + if isCloudInitFile(content) { + _, err := d.exec.Execute(ctx, &exec.Params{ + Name: "cloud-init", + Args: []string{"devel", "schema", "--config-file", UserdataPath}, + }) + if err != nil { + d.log.Error("error when running cloud-init userdata, continuing anyway", "error", err) + } + + return nil + } + + err = d.fs.Rename(UserdataPath, ignitionUserdataPath) + if err != nil { + return err + } + + rawConfig, err := d.fs.ReadFile(ignitionUserdataPath) + if err != nil { + return err + } + _, report, err := ignitionConfig.Parse(rawConfig) + if err != nil { + d.log.Error("error when validating ignition userdata, continuing anyway", "error", err) + } + + d.log.Info("executing ignition") + + _, err = d.exec.Execute(ctx, &exec.Params{ + Name: "ignition", + Args: []string{"-oem", "file", "-stage", "files", "-log-to-stdout"}, + Dir: "/", + }) + if err != nil { + // if the user provides userdata that does not work out we still want the machine to start up + d.log.Error("error when running ignition, continuing anyway", "report", report.Entries, "error", err) + } + + return nil +} + +func isCloudInitFile(content []byte) bool { + for i, line := range strings.Split(string(content), "\n") { + if strings.Contains(line, "#cloud-config") { + return true + } + if i > 1 { + return false + } + } + return false +} diff --git a/pkg/installer/os/common/systemd_services.go b/pkg/installer/os/common/systemd_services.go new file mode 100644 index 0000000..2169c54 --- /dev/null +++ b/pkg/installer/os/common/systemd_services.go @@ -0,0 +1,11 @@ +package oscommon + +import ( + "context" + + "github.com/metal-stack/os-installer/pkg/services" +) + +func (d *CommonTasks) SystemdServices(ctx context.Context) error { + return services.WriteSystemdServices(ctx, d.log, d.network, d.details.ID) +} diff --git a/pkg/installer/os/common/unset_machine_id.go b/pkg/installer/os/common/unset_machine_id.go new file mode 100644 index 0000000..d3361d5 --- /dev/null +++ b/pkg/installer/os/common/unset_machine_id.go @@ -0,0 +1,25 @@ +package oscommon + +import "context" + +const ( + EtcMachineID = "/etc/machine-id" + DbusMachineID = "/var/lib/dbus/machine-id" +) + +func (d *CommonTasks) UnsetMachineID(ctx context.Context) error { + for _, filePath := range []string{EtcMachineID, DbusMachineID} { + if !d.fileExists(filePath) { + continue + } + + f, err := d.fs.Create(filePath) + if err != nil { + return err + } + + _ = f.Close() + } + + return nil +} diff --git a/pkg/installer/os/common/write_boot_info.go b/pkg/installer/os/common/write_boot_info.go new file mode 100644 index 0000000..dc57c23 --- /dev/null +++ b/pkg/installer/os/common/write_boot_info.go @@ -0,0 +1,28 @@ +package oscommon + +import ( + "context" + "fmt" + + v1 "github.com/metal-stack/os-installer/api/v1" + "go.yaml.in/yaml/v3" +) + +func (d *CommonTasks) WriteBootInfo(ctx context.Context, initramdiskFormatString, bootloaderID, cmdLine string) error { + kern, initrd, err := d.KernelAndInitrdPath(initramdiskFormatString) + if err != nil { + return err + } + + content, err := yaml.Marshal(v1.Bootinfo{ + Initrd: initrd, + Cmdline: cmdLine, + Kernel: kern, + BootloaderID: bootloaderID, + }) + if err != nil { + return fmt.Errorf("unable to write boot-info.yaml: %w", err) + } + + return d.fs.WriteFile(v1.BootInfoPath, content, 0700) +} diff --git a/pkg/installer/os/common/write_build_meta.go b/pkg/installer/os/common/write_build_meta.go new file mode 100644 index 0000000..7a75a72 --- /dev/null +++ b/pkg/installer/os/common/write_build_meta.go @@ -0,0 +1,41 @@ +package oscommon + +import ( + "context" + "strings" + + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/metal-stack/v" + "go.yaml.in/yaml/v3" +) + +func (d *CommonTasks) WriteBuildMeta(ctx context.Context) error { + d.log.Debug("writing build meta file", "path", v1.BuildMetaPath) + + meta := &v1.BuildMeta{ + Version: v.Version, + Date: v.BuildDate, + SHA: v.GitSHA1, + Revision: v.Revision, + } + + out, err := d.exec.Execute(ctx, &exec.Params{ + Name: "ignition", + Args: []string{"-version"}, + }) + if err != nil { + d.log.Error("error detecting ignition version for build meta, continuing anyway", "error", err) + } else { + meta.IgnitionVersion = strings.TrimSpace(out) + } + + content, err := yaml.Marshal(meta) + if err != nil { + return err + } + + content = append([]byte("---\n"), content...) + + return d.fs.WriteFile(v1.BuildMetaPath, content, 0644) +} diff --git a/pkg/installer/os/common/write_hostname.go b/pkg/installer/os/common/write_hostname.go new file mode 100644 index 0000000..1bcfc70 --- /dev/null +++ b/pkg/installer/os/common/write_hostname.go @@ -0,0 +1,13 @@ +package oscommon + +import ( + "context" +) + +const ( + HostnameFilePath = "/etc/hostname" +) + +func (d *CommonTasks) WriteHostname(ctx context.Context) error { + return d.fs.WriteFile(HostnameFilePath, []byte(d.allocation.Hostname), 0644) +} diff --git a/pkg/installer/os/common/write_hosts.go b/pkg/installer/os/common/write_hosts.go new file mode 100644 index 0000000..fd3ad89 --- /dev/null +++ b/pkg/installer/os/common/write_hosts.go @@ -0,0 +1,23 @@ +package oscommon + +import ( + "context" + "fmt" +) + +const ( + EtcHostsPath = "/etc/hosts" + etcHostsFileContent = `# this file was auto generated by the os-installer +127.0.0.1 localhost +%s %s +` +) + +func (d *CommonTasks) WriteHosts(ctx context.Context) error { + ips, err := d.network.PrivatePrimaryIPs() + if err != nil { + return err + } + + return d.fs.WriteFile(EtcHostsPath, fmt.Appendf(nil, etcHostsFileContent, ips[0], d.allocation.Hostname), 0644) +} diff --git a/pkg/installer/os/common/write_ntp_conf.go b/pkg/installer/os/common/write_ntp_conf.go new file mode 100644 index 0000000..a08898a --- /dev/null +++ b/pkg/installer/os/common/write_ntp_conf.go @@ -0,0 +1,43 @@ +package oscommon + +import ( + "context" + "fmt" + "strings" + + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" +) + +const ( + TimesyncdConfigPath = "/etc/systemd/timesyncd.conf" +) + +func (d *CommonTasks) WriteNTPConf(ctx context.Context) error { + if d.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL { + d.log.Info("skipping timesyncd config for firewalls as chrony will be configured later on through systemd service renderer") + return nil + } + + if len(d.allocation.NtpServers) == 0 { + return nil + } + + var ntpServers []string + + for _, ntp := range d.allocation.NtpServers { + ntpServers = append(ntpServers, ntp.Address) + } + + return d.WriteNtpConfToPath(TimesyncdConfigPath, ntpServers) +} + +func (d *CommonTasks) WriteNtpConfToPath(configPath string, ntpServers []string) error { + content := fmt.Sprintf("[Time]\nNTP=%s\n", strings.Join(ntpServers, " ")) + + err := d.fs.Remove(configPath) + if err != nil { + d.log.Info("ntp config file not present", "file", configPath) + } + + return d.fs.WriteFile(configPath, []byte(content), 0644) +} diff --git a/pkg/installer/os/common/write_resolv_conf.go b/pkg/installer/os/common/write_resolv_conf.go new file mode 100644 index 0000000..804fd5b --- /dev/null +++ b/pkg/installer/os/common/write_resolv_conf.go @@ -0,0 +1,37 @@ +package oscommon + +import ( + "context" + "strings" + + "github.com/spf13/afero" +) + +const ( + ResolvConfPath = "/etc/resolv.conf" +) + +func (d *CommonTasks) WriteResolvConf(ctx context.Context) error { + d.log.Info("write configuration", "file", ResolvConfPath) + // Must be written here because during docker build this file is synthetic + err := d.fs.Remove(ResolvConfPath) + if err != nil { + d.log.Info("config file not present", "file", ResolvConfPath) + } + + content := []byte( + `nameserver 8.8.8.8 +nameserver 8.8.4.4 +`) + + if len(d.allocation.DnsServers) > 0 { + var s strings.Builder + for _, dnsServer := range d.allocation.DnsServers { + s.WriteString("nameserver " + dnsServer.Ip + "\n") + } + + content = []byte(s.String()) + } + + return afero.WriteFile(d.fs, ResolvConfPath, content, 0644) +} diff --git a/pkg/installer/os/debian/debian.go b/pkg/installer/os/debian/debian.go new file mode 100644 index 0000000..480f2b8 --- /dev/null +++ b/pkg/installer/os/debian/debian.go @@ -0,0 +1,35 @@ +package debian + +import ( + "context" + + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" +) + +type ( + Os struct { + *oscommon.CommonTasks + } +) + +func New(cfg *oscommon.Config) *Os { + return &Os{ + CommonTasks: oscommon.New(cfg), + } +} + +func (o *Os) BootloaderID() string { + return "metal-debian" +} + +func (o *Os) WriteBootInfo(ctx context.Context, cmdLine string) error { + return o.CommonTasks.WriteBootInfo(ctx, o.InitramdiskFormatString(), o.BootloaderID(), cmdLine) +} + +func (o *Os) CreateMetalUser(ctx context.Context) error { + return o.CommonTasks.CreateMetalUser(ctx, o.SudoGroup()) +} + +func (o *Os) GrubInstall(ctx context.Context, cmdLine string) error { + return o.CommonTasks.GrubInstall(ctx, o.BootloaderID(), cmdLine) +} diff --git a/pkg/installer/os/debian/tests/debian_test.go b/pkg/installer/os/debian/tests/debian_test.go new file mode 100644 index 0000000..6cbdf33 --- /dev/null +++ b/pkg/installer/os/debian/tests/debian_test.go @@ -0,0 +1,26 @@ +package debian_test + +import ( + "encoding/json" + "fmt" + goos "os" + "testing" + + "github.com/metal-stack/os-installer/pkg/test" + "github.com/stretchr/testify/require" +) + +func TestHelperProcess(t *testing.T) { + if goos.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + var f test.FakeExecParams + err := json.Unmarshal([]byte(goos.Args[3]), &f) + require.NoError(t, err) + + _, err = fmt.Fprint(goos.Stdout, f.Output) + require.NoError(t, err) + + goos.Exit(f.ExitCode) +} diff --git a/pkg/installer/os/debian/tests/doc.go b/pkg/installer/os/debian/tests/doc.go new file mode 100644 index 0000000..b77ea8e --- /dev/null +++ b/pkg/installer/os/debian/tests/doc.go @@ -0,0 +1,3 @@ +package debian + +// as most of the implementation is shared, it's sufficient to put all tests in the ubuntu os implementation and just test the differences here diff --git a/pkg/installer/os/debian/tests/install_bootloader_test.go b/pkg/installer/os/debian/tests/install_bootloader_test.go new file mode 100644 index 0000000..6d5d4a5 --- /dev/null +++ b/pkg/installer/os/debian/tests/install_bootloader_test.go @@ -0,0 +1,166 @@ +package debian_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/debian" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + sampleMdadmScanOutput = `ARRAY /dev/md/0 metadata=1.0 UUID=42d10089:ee1e0399:445e7550:62b63ec8 name=any:0 +ARRAY /dev/md/1 metadata=1.0 UUID=543eb7f8:98d4d986:e669824d:bebe69e5 name=any:1 +ARRAY /dev/md/2 metadata=1.0 UUID=fc32a6f0:ee40d9db:87c8c9f3:a8400c8b name=any:2` + + sampleBlkidOutput = `/dev/sda1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="cc57c456-0b2f-6345-c597-d861cc6dd8ac" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="273985c8-d097-4123-bcd0-80b4e4e14728" +/dev/sda2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="54748c60-b566-f391-142c-fb78bb1fc6a9" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="d7863f4e-af7c-47fc-8c03-6ecdc69bc72d" +/dev/sda3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="582e9b4f-f191-e01e-85fd-2f7d969fbef6" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="e8b44f09-b7f7-4e0d-a7c3-d909617d1f05" +/dev/sdb1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="61bd5d8b-1bb8-673b-9e61-8c28dccc3812" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="13a4c568-57b0-4259-9927-9ac023aaa5f0" +/dev/sdb2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="e7d01e93-9340-5b90-68f8-d8f815595132" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="ab11cd86-37b8-4bae-81e5-21fe0a9c9ae0" +/dev/sdb3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="764217ad-1591-a83a-c799-23397f968729" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="9afbf9c1-b2ba-4b46-8db1-e802d26c93b6" +/dev/md1: LABEL="root" UUID="ace079b5-06be-4429-bbf0-081ea4d7d0d9" TYPE="ext4" +/dev/md0: LABEL="efi" UUID="C236-297F" TYPE="vfat" +/dev/md2: LABEL="varlib" UUID="385e8e8e-dbfd-481e-93a4-cba7f4d5fa02" TYPE="ext4"` +) + +func Test_os_GrubInstall(t *testing.T) { + tests := []struct { + name string + cmdLine string + details *v1.MachineDetails + execMocks []test.FakeExecParams + want string + wantErr error + }{ + { + name: "without raid", + cmdLine: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", + details: &v1.MachineDetails{ + Console: "ttyS1,115200n8", + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"grub-install", "--target=x86_64-efi", "--efi-directory=/boot/efi", "--boot-directory=/boot", "--bootloader-id=metal-debian", "--removable"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"update-grub2"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"dpkg-reconfigure", "grub-efi-amd64-bin"}, + Output: "", + ExitCode: 0, + }, + }, + want: `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=metal-debian +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" +`, + }, + { + name: "with raid", + cmdLine: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", + details: &v1.MachineDetails{ + RaidEnabled: true, + RootUUID: "ace079b5-06be-4429-bbf0-081ea4d7d0d9", + Console: "ttyS1,115200n8", + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"mdadm", "--examine", "--scan"}, + Output: sampleMdadmScanOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"update-initramfs", "-u"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"blkid"}, + Output: sampleBlkidOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sda1", "-p1", "-l", "\\\\EFI\\\\metal-debian\\\\grubx64.efi", "-L", "metal-debian"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sdb1", "-p1", "-l", "\\\\EFI\\\\metal-debian\\\\grubx64.efi", "-L", "metal-debian"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"grub-install", "--target=x86_64-efi", "--efi-directory=/boot/efi", "--boot-directory=/boot", "--bootloader-id=metal-debian", "--removable", "--no-nvram"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"update-grub2"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"dpkg-reconfigure", "grub-efi-amd64-bin"}, + Output: "", + ExitCode: 0, + }, + }, + want: `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=metal-debian +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + d := debian.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + }) + + gotErr := d.GrubInstall(t.Context(), tt.cmdLine) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(oscommon.DefaultGrubPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/debian/tests/write_boot_info_test.go b/pkg/installer/os/debian/tests/write_boot_info_test.go new file mode 100644 index 0000000..80b55ba --- /dev/null +++ b/pkg/installer/os/debian/tests/write_boot_info_test.go @@ -0,0 +1,124 @@ +package debian_test + +import ( + "fmt" + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/debian" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.yaml.in/yaml/v3" +) + +func Test_os_WriteBootInfo(t *testing.T) { + tests := []struct { + name string + cmdLine string + fsMocks func(fs *afero.Afero) + want *v1.Bootinfo + wantErr error + }{ + { + name: "boot-info debian", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: &v1.Bootinfo{ + Initrd: "/boot/initrd.img-1.2.3", + Cmdline: "a-cmd-line", + Kernel: "/boot/vmlinuz-1.2.3", + BootloaderID: "metal-debian", + }, + }, + { + name: "more than one system.map present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.4", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("no single System.map found ([/boot/System.map-1.2.3 /boot/System.map-1.2.4]), probably no kernel or more than one kernel installed"), + }, + { + name: "no system.map present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("no single System.map found ([]), probably no kernel or more than one kernel installed"), + }, + { + name: "no vmlinuz present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("kernel image \"/boot/vmlinuz-1.2.3\" not found"), + }, + { + name: "no ramdisk present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("ramdisk \"/boot/initrd.img-1.2.3\" not found"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := debian.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + gotErr := d.WriteBootInfo(t.Context(), tt.cmdLine) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(v1.BootInfoPath) + require.NoError(t, err) + + var bootInfo v1.Bootinfo + err = yaml.Unmarshal(content, &bootInfo) + require.NoError(t, err) + + assert.Equal(t, tt.want, &bootInfo) + }) + } +} diff --git a/pkg/installer/os/os.go b/pkg/installer/os/os.go new file mode 100644 index 0000000..86f05fb --- /dev/null +++ b/pkg/installer/os/os.go @@ -0,0 +1,110 @@ +package operatingsystem + +import ( + "fmt" + "strconv" + "strings" + + "github.com/metal-stack/os-installer/pkg/exec" + "github.com/metal-stack/os-installer/pkg/installer/os/almalinux" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/debian" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/spf13/afero" +) + +const ( + OsReleasePath = "/etc/os-release" + + ubuntuOS = osName("ubuntu") + debianOS = osName("debian") + almalinuxOS = osName("almalinux") + // defaultOS contains no specific overwrites and can be used by out-of-tree images + defaultOS = osName("default") +) + +type ( + osName string +) + +func New(cfg *oscommon.Config) (oscommon.OperatingSystem, error) { + if cfg.Log == nil { + return nil, fmt.Errorf("log must be passed to os-installer") + } + if cfg.Allocation == nil { + return nil, fmt.Errorf("allocation must be passed to os-installer") + } + if cfg.MachineDetails == nil { + return nil, fmt.Errorf("machine details must be passed to os-installer") + } + + if cfg.Fs == nil { + cfg.Fs = &afero.Afero{ + Fs: afero.OsFs{}, + } + } + + if cfg.Exec == nil { + cfg.Exec = exec.New(cfg.Log) + } + + if cfg.Name != nil { + return fromOsName(*cfg.Name, cfg) + } + + os, err := detectOS(cfg) + if err != nil { + cfg.Log.Error("unable to detect operating system, falling back to default implementation", "error", err) + return fromOsName(string(defaultOS), cfg) + } + + return os, nil +} + +func detectOS(cfg *oscommon.Config) (oscommon.OperatingSystem, error) { + cfg.Log.Debug("automatically detecting operating system for installation") + + content, err := cfg.Fs.ReadFile(OsReleasePath) + if err != nil { + return nil, err + } + + env := map[string]string{} + for line := range strings.SplitSeq(string(content), "\n") { + k, v, found := strings.Cut(line, "=") + if found { + env[k] = v + } + } + + if os, ok := env["ID"]; ok { + unquoted, err := strconv.Unquote(os) + if err == nil { + os = unquoted + } + + return fromOsName(os, cfg) + } + + return nil, fmt.Errorf("unable to detect os, no ID field found /etc/os-release") +} + +func fromOsName(name string, cfg *oscommon.Config) (oscommon.OperatingSystem, error) { + switch os := osName(strings.ToLower(name)); os { + case ubuntuOS: + cfg.Log.Info("using ubuntu os-installer") + return ubuntu.New(cfg), nil + case debianOS: + cfg.Log.Info("using debian os-installer") + return debian.New(cfg), nil + case almalinuxOS: + cfg.Log.Info("using almalinux os-installer") + return almalinux.New(cfg), nil + default: + if cfg.Name != nil { + return nil, fmt.Errorf("os with name %q is not supported", os) + } + cfg.Log.Info("using default os-installer implementation") + return oscommon.NewDefaultOS(cfg), nil + } +} diff --git a/pkg/installer/os/os_test.go b/pkg/installer/os/os_test.go new file mode 100644 index 0000000..3bc3357 --- /dev/null +++ b/pkg/installer/os/os_test.go @@ -0,0 +1,170 @@ +package operatingsystem_test + +import ( + "fmt" + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + operatingsystem "github.com/metal-stack/os-installer/pkg/installer/os" + "github.com/metal-stack/os-installer/pkg/installer/os/almalinux" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/debian" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + ubuntuRelease = `PRETTY_NAME="Ubuntu 24.04.4 LTS" +NAME="Ubuntu" +VERSION_ID="24.04" +VERSION="24.04.4 LTS (Noble Numbat)" +VERSION_CODENAME=noble +ID=ubuntu +ID_LIKE=debian +HOME_URL="https://www.ubuntu.com/" +SUPPORT_URL="https://help.ubuntu.com/" +BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" +PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" +UBUNTU_CODENAME=noble +LOGO=ubuntu-logo +` + debianRelease = `PRETTY_NAME="Debian GNU/Linux 13 (trixie)" +NAME="Debian GNU/Linux" +VERSION_ID="13" +VERSION="13 (trixie)" +VERSION_CODENAME=trixie +DEBIAN_VERSION_FULL=13.4 +ID=debian +HOME_URL="https://www.debian.org/" +SUPPORT_URL="https://www.debian.org/support" +BUG_REPORT_URL="https://bugs.debian.org/" +` + almalinuxRelease = `NAME="AlmaLinux" +VERSION="10.1 (Heliotrope Lion)" +ID="almalinux" +ID_LIKE="rhel centos fedora" +VERSION_ID="10.1" +PLATFORM_ID="platform:el10" +PRETTY_NAME="AlmaLinux 10.1 (Heliotrope Lion)" +ANSI_COLOR="0;34" +LOGO="fedora-logo-icon" +CPE_NAME="cpe:/o:almalinux:almalinux:10.1" +HOME_URL="https://almalinux.org/" +DOCUMENTATION_URL="https://wiki.almalinux.org/" +VENDOR_NAME="AlmaLinux" +VENDOR_URL="https://almalinux.org/" +BUG_REPORT_URL="https://bugs.almalinux.org/" + +ALMALINUX_MANTISBT_PROJECT="AlmaLinux-10" +ALMALINUX_MANTISBT_PROJECT_VERSION="10.1" +REDHAT_SUPPORT_PRODUCT="AlmaLinux" +REDHAT_SUPPORT_PRODUCT_VERSION="10.1" +SUPPORT_END=2035-06-01 +` + unknownRelease = `NAME="EndeavourOS" +PRETTY_NAME="EndeavourOS" +ID="endeavouros" +ID_LIKE="arch" +BUILD_ID="2025.03.19" +ANSI_COLOR="38;2;23;147;209" +HOME_URL="https://endeavouros.com" +DOCUMENTATION_URL="https://discovery.endeavouros.com" +SUPPORT_URL="https://forum.endeavouros.com" +BUG_REPORT_URL="https://forum.endeavouros.com/c/general-system/endeavouros-installation" +PRIVACY_POLICY_URL="https://endeavouros.com/privacy-policy-2" +LOGO="endeavouros"` +) + +func Test_New(t *testing.T) { + tests := []struct { + name string + explicitOS *string + fsMocks func(fs *afero.Afero) + want any + wantErr error + }{ + { + name: "detect ubuntu", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, operatingsystem.OsReleasePath, []byte(ubuntuRelease), 0777)) + }, + want: &ubuntu.Os{}, + }, + { + name: "detect debian", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, operatingsystem.OsReleasePath, []byte(debianRelease), 0777)) + }, + want: &debian.Os{}, + }, + { + name: "detect almalinux", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, operatingsystem.OsReleasePath, []byte(almalinuxRelease), 0777)) + }, + want: &almalinux.Os{}, + }, + { + name: "detect default for unknown", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, operatingsystem.OsReleasePath, []byte(unknownRelease), 0777)) + }, + want: &oscommon.DefaultOS{}, + }, + { + name: "explicitly want almalinux impl on unknown os", + explicitOS: new("almalinux"), + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, operatingsystem.OsReleasePath, []byte(unknownRelease), 0777)) + }, + want: &almalinux.Os{}, + }, + { + name: "explicitly want unsupported", + explicitOS: new("foo"), + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, operatingsystem.OsReleasePath, []byte(unknownRelease), 0777)) + }, + wantErr: fmt.Errorf(`os with name "foo" is not supported`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + os, gotErr := operatingsystem.New(&oscommon.Config{ + Log: log, + Name: tt.explicitOS, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + MachineDetails: &v1.MachineDetails{}, + Allocation: &apiv2.MachineAllocation{}, + }) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + assert.IsType(t, tt.want, os) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/cmd_line_test.go b/pkg/installer/os/ubuntu/tests/cmd_line_test.go new file mode 100644 index 0000000..693a273 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/cmd_line_test.go @@ -0,0 +1,97 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" +) + +const ( + sampleMdadmDetailOutput = `MD_LEVEL=raid1 +MD_DEVICES=2 +MD_METADATA=1.0 +MD_UUID=543eb7f8:98d4d986:e669824d:bebe69e5 +MD_DEVNAME=1 +MD_NAME=any:1 +MD_DEVICE_dev_sdb2_ROLE=1 +MD_DEVICE_dev_sdb2_DEV=/dev/sdb2 +MD_DEVICE_dev_sda2_ROLE=0 +MD_DEVICE_dev_sda2_DEV=/dev/sda2` +) + +func Test_os_BuildCMDLine(t *testing.T) { + tests := []struct { + name string + details *v1.MachineDetails + execMocks []test.FakeExecParams + want string + wantErr error + }{ + { + name: "no raid", + details: &v1.MachineDetails{ + RootUUID: "543eb7f8-98d4-d986-e669-824dbebe69e5", + RaidEnabled: false, + Console: "ttyS1,115200n8", + }, + want: "console=ttyS1,115200n8 root=UUID=543eb7f8-98d4-d986-e669-824dbebe69e5 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", + wantErr: nil, + }, + { + name: "with raid", + details: &v1.MachineDetails{ + RootUUID: "ace079b5-06be-4429-bbf0-081ea4d7d0d9", + RaidEnabled: true, + Console: "ttyS1,115200n8", + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"blkid"}, + Output: sampleBlkidOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"mdadm", "--detail", "--export", "/dev/md1"}, + Output: sampleMdadmDetailOutput, + ExitCode: 0, + }, + }, + want: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300 rdloaddriver=raid0 rdloaddriver=raid1 rd.md.uuid=543eb7f8:98d4d986:e669824d:bebe69e5", + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log := slog.Default() + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: &afero.Afero{ + Fs: afero.NewMemMapFs(), + }, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + }) + + got, gotErr := d.BuildCMDLine(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("diff (+got -want):\n %s", diff) + } + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/copy_ssh_keys_test.go b/pkg/installer/os/ubuntu/tests/copy_ssh_keys_test.go new file mode 100644 index 0000000..e922924 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/copy_ssh_keys_test.go @@ -0,0 +1,74 @@ +package ubuntu_test + +import ( + "log/slog" + "os/user" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_CopySSHKeys(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + lookupUserFn oscommon.LookupUserFn + wantErr error + }{ + { + name: "copy ssh keys", + lookupUserFn: func(name string) (*user.User, error) { + return &user.User{ + Uid: "1000", + Gid: "1000", + Username: oscommon.MetalUser, + Name: oscommon.MetalUser, + HomeDir: "/home/metal", + }, nil + }, + allocation: &apiv2.MachineAllocation{ + SshPublicKeys: []string{"a", "b"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + LookupUserFn: tt.lookupUserFn, + Allocation: tt.allocation, + }) + + gotErr := d.CopySSHKeys(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile("/home/metal/.ssh/authorized_keys") + require.NoError(t, err) + + assert.Equal(t, "a\nb", string(content)) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/create_metal_user_test.go b/pkg/installer/os/ubuntu/tests/create_metal_user_test.go new file mode 100644 index 0000000..45af7b1 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/create_metal_user_test.go @@ -0,0 +1,107 @@ +package ubuntu_test + +import ( + "log/slog" + "os/user" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" +) + +func Test_os_CreateMetalUser(t *testing.T) { + tests := []struct { + name string + details *v1.MachineDetails + execMocks []test.FakeExecParams + lookupUserFn oscommon.LookupUserFn + want string + wantErr error + }{ + { + name: "create user already exists", + details: &v1.MachineDetails{ + Password: "abc", + }, + lookupUserFn: func(name string) (*user.User, error) { + return &user.User{ + Uid: "1000", + Gid: "1000", + Username: oscommon.MetalUser, + Name: oscommon.MetalUser, + HomeDir: "/home/metal", + }, nil + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"userdel", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"useradd", "--create-home", "--uid", "1000", "--gid", "sudo", "--shell", "/bin/bash", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"passwd", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + }, + }, + { + name: "create user does not yet exist", + details: &v1.MachineDetails{ + Password: "abc", + }, + lookupUserFn: func(name string) (*user.User, error) { + return nil, user.UnknownUserError(oscommon.MetalUser) + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"useradd", "--create-home", "--uid", "1000", "--gid", "sudo", "--shell", "/bin/bash", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"passwd", oscommon.MetalUser}, + Output: "", + ExitCode: 0, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + LookupUserFn: tt.lookupUserFn, + }) + + gotErr := d.CreateMetalUser(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/fix_permissions_test.go b/pkg/installer/os/ubuntu/tests/fix_permissions_test.go new file mode 100644 index 0000000..f2d3bee --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/fix_permissions_test.go @@ -0,0 +1,65 @@ +package ubuntu_test + +import ( + iofs "io/fs" + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_FixPermissions(t *testing.T) { + tests := []struct { + name string + fsMocks func(fs afero.Fs) + wantErr error + }{ + { + name: "fix permissions", + fsMocks: func(fs afero.Fs) { + require.NoError(t, fs.MkdirAll("/var/tmp", 0000)) + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + err := d.FixPermissions(t.Context()) + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n %s", diff) + } + + info, err := fs.Stat("/var/tmp") + require.NoError(t, err) + assert.Equal(t, iofs.FileMode(01777).Perm(), info.Mode().Perm()) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/install_bootloader_test.go b/pkg/installer/os/ubuntu/tests/install_bootloader_test.go new file mode 100644 index 0000000..b0a72ee --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/install_bootloader_test.go @@ -0,0 +1,166 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + sampleMdadmScanOutput = `ARRAY /dev/md/0 metadata=1.0 UUID=42d10089:ee1e0399:445e7550:62b63ec8 name=any:0 +ARRAY /dev/md/1 metadata=1.0 UUID=543eb7f8:98d4d986:e669824d:bebe69e5 name=any:1 +ARRAY /dev/md/2 metadata=1.0 UUID=fc32a6f0:ee40d9db:87c8c9f3:a8400c8b name=any:2` + + sampleBlkidOutput = `/dev/sda1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="cc57c456-0b2f-6345-c597-d861cc6dd8ac" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="273985c8-d097-4123-bcd0-80b4e4e14728" +/dev/sda2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="54748c60-b566-f391-142c-fb78bb1fc6a9" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="d7863f4e-af7c-47fc-8c03-6ecdc69bc72d" +/dev/sda3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="582e9b4f-f191-e01e-85fd-2f7d969fbef6" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="e8b44f09-b7f7-4e0d-a7c3-d909617d1f05" +/dev/sdb1: UUID="42d10089-ee1e-0399-445e-755062b63ec8" UUID_SUB="61bd5d8b-1bb8-673b-9e61-8c28dccc3812" LABEL="any:0" TYPE="linux_raid_member" PARTLABEL="efi" PARTUUID="13a4c568-57b0-4259-9927-9ac023aaa5f0" +/dev/sdb2: UUID="543eb7f8-98d4-d986-e669-824dbebe69e5" UUID_SUB="e7d01e93-9340-5b90-68f8-d8f815595132" LABEL="any:1" TYPE="linux_raid_member" PARTLABEL="root" PARTUUID="ab11cd86-37b8-4bae-81e5-21fe0a9c9ae0" +/dev/sdb3: UUID="fc32a6f0-ee40-d9db-87c8-c9f3a8400c8b" UUID_SUB="764217ad-1591-a83a-c799-23397f968729" LABEL="any:2" TYPE="linux_raid_member" PARTLABEL="varlib" PARTUUID="9afbf9c1-b2ba-4b46-8db1-e802d26c93b6" +/dev/md1: LABEL="root" UUID="ace079b5-06be-4429-bbf0-081ea4d7d0d9" TYPE="ext4" +/dev/md0: LABEL="efi" UUID="C236-297F" TYPE="vfat" +/dev/md2: LABEL="varlib" UUID="385e8e8e-dbfd-481e-93a4-cba7f4d5fa02" TYPE="ext4"` +) + +func Test_os_GrubInstall(t *testing.T) { + tests := []struct { + name string + cmdLine string + details *v1.MachineDetails + execMocks []test.FakeExecParams + want string + wantErr error + }{ + { + name: "without raid", + cmdLine: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", + details: &v1.MachineDetails{ + Console: "ttyS1,115200n8", + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"grub-install", "--target=x86_64-efi", "--efi-directory=/boot/efi", "--boot-directory=/boot", "--bootloader-id=metal-ubuntu", "--removable"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"update-grub2"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"dpkg-reconfigure", "grub-efi-amd64-bin"}, + Output: "", + ExitCode: 0, + }, + }, + want: `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=metal-ubuntu +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" +`, + }, + { + name: "with raid", + cmdLine: "console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300", + details: &v1.MachineDetails{ + RaidEnabled: true, + RootUUID: "ace079b5-06be-4429-bbf0-081ea4d7d0d9", + Console: "ttyS1,115200n8", + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"mdadm", "--examine", "--scan"}, + Output: sampleMdadmScanOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"update-initramfs", "-u"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"blkid"}, + Output: sampleBlkidOutput, + ExitCode: 0, + }, + { + WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sda1", "-p1", "-l", "\\\\EFI\\\\metal-ubuntu\\\\grubx64.efi", "-L", "metal-ubuntu"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"efibootmgr", "-c", "-d", "/dev/sdb1", "-p1", "-l", "\\\\EFI\\\\metal-ubuntu\\\\grubx64.efi", "-L", "metal-ubuntu"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"grub-install", "--target=x86_64-efi", "--efi-directory=/boot/efi", "--boot-directory=/boot", "--bootloader-id=metal-ubuntu", "--removable", "--no-nvram"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"update-grub2"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"dpkg-reconfigure", "grub-efi-amd64-bin"}, + Output: "", + ExitCode: 0, + }, + }, + want: `GRUB_DEFAULT=0 +GRUB_TIMEOUT=5 +GRUB_DISTRIBUTOR=metal-ubuntu +GRUB_CMDLINE_LINUX_DEFAULT="" +GRUB_CMDLINE_LINUX="console=ttyS1,115200n8 root=UUID=ace079b5-06be-4429-bbf0-081ea4d7d0d9 init=/sbin/init net.ifnames=0 biosdevname=0 nvme_core.io_timeout=300" +GRUB_TERMINAL=serial +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=1 --word=8" +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + }) + + gotErr := d.GrubInstall(t.Context(), tt.cmdLine) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(oscommon.DefaultGrubPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/process_userdata_test.go b/pkg/installer/os/ubuntu/tests/process_userdata_test.go new file mode 100644 index 0000000..14c2795 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/process_userdata_test.go @@ -0,0 +1,107 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" +) + +const ( + sampleCloudInit = `#cloud-config +# Add groups to the system +# The following example adds the ubuntu group with members 'root' and 'sys' +# and the empty group cloud-users. +groups: + - admingroup: [root,sys] + - cloud-users` + sampleIgnition = `{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"}}` +) + +func Test_os_ProcessUserdata(t *testing.T) { + tests := []struct { + name string + details *v1.MachineDetails + fsMocks func(fs *afero.Afero) + execMocks []test.FakeExecParams + want string + wantErr error + }{ + { + name: "no userdata given", + }, + { + name: "cloud-init", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, oscommon.UserdataPath, []byte(sampleCloudInit), 0700)) + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"cloud-init", "devel", "schema", "--config-file", oscommon.UserdataPath}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"systemctl", "preset-all"}, + Output: "", + ExitCode: 0, + }, + }, + }, + { + name: "ignition", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, afero.WriteFile(fs, oscommon.UserdataPath, []byte(sampleIgnition), 0700)) + }, + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"ignition", "-oem", "file", "-stage", "files", "-log-to-stdout"}, + Output: "", + ExitCode: 0, + }, + { + WantCmd: []string{"systemctl", "preset-all"}, + Output: "", + ExitCode: 0, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + MachineDetails: tt.details, + }) + + gotErr := d.ProcessUserdata(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/ubuntu_test.go b/pkg/installer/os/ubuntu/tests/ubuntu_test.go new file mode 100644 index 0000000..7b56718 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/ubuntu_test.go @@ -0,0 +1,26 @@ +package ubuntu_test + +import ( + "encoding/json" + "fmt" + goos "os" + "testing" + + "github.com/metal-stack/os-installer/pkg/test" + "github.com/stretchr/testify/require" +) + +func TestHelperProcess(t *testing.T) { + if goos.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + var f test.FakeExecParams + err := json.Unmarshal([]byte(goos.Args[3]), &f) + require.NoError(t, err) + + _, err = fmt.Fprint(goos.Stdout, f.Output) + require.NoError(t, err) + + goos.Exit(f.ExitCode) +} diff --git a/pkg/installer/os/ubuntu/tests/unset_machine_id_test.go b/pkg/installer/os/ubuntu/tests/unset_machine_id_test.go new file mode 100644 index 0000000..dcf6078 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/unset_machine_id_test.go @@ -0,0 +1,76 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_UnsetMachineID(t *testing.T) { + tests := []struct { + name string + fsMocks func(fs *afero.Afero) + wantErr error + }{ + { + name: "unset", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/etc/machine-id", []byte("uuid"), 0700)) + require.NoError(t, fs.WriteFile("/var/lib/dbus/machine-id", []byte("uuid"), 0700)) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + content, err := fs.ReadFile(oscommon.EtcMachineID) + require.NoError(t, err) + require.Equal(t, "uuid", string(content)) + + content, err = fs.ReadFile(oscommon.DbusMachineID) + require.NoError(t, err) + require.Equal(t, "uuid", string(content)) + + gotErr := d.UnsetMachineID(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err = fs.ReadFile(oscommon.EtcMachineID) + require.NoError(t, err) + assert.Empty(t, content) + + content, err = fs.ReadFile(oscommon.DbusMachineID) + require.NoError(t, err) + assert.Empty(t, content) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/write_boot_info_test.go b/pkg/installer/os/ubuntu/tests/write_boot_info_test.go new file mode 100644 index 0000000..f3d313a --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/write_boot_info_test.go @@ -0,0 +1,124 @@ +package ubuntu_test + +import ( + "fmt" + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.yaml.in/yaml/v3" +) + +func Test_os_WriteBootInfo(t *testing.T) { + tests := []struct { + name string + cmdLine string + fsMocks func(fs *afero.Afero) + want *v1.Bootinfo + wantErr error + }{ + { + name: "boot-info ubuntu", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: &v1.Bootinfo{ + Initrd: "/boot/initrd.img-1.2.3", + Cmdline: "a-cmd-line", + Kernel: "/boot/vmlinuz-1.2.3", + BootloaderID: "metal-ubuntu", + }, + }, + { + name: "more than one system.map present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.4", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("no single System.map found ([/boot/System.map-1.2.3 /boot/System.map-1.2.4]), probably no kernel or more than one kernel installed"), + }, + { + name: "no system.map present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("no single System.map found ([]), probably no kernel or more than one kernel installed"), + }, + { + name: "no vmlinuz present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/initrd.img-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("kernel image \"/boot/vmlinuz-1.2.3\" not found"), + }, + { + name: "no ramdisk present", + cmdLine: "a-cmd-line", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile("/boot/System.map-1.2.3", nil, 0700)) + require.NoError(t, fs.WriteFile("/boot/vmlinuz-1.2.3", nil, 0700)) + }, + want: nil, + wantErr: fmt.Errorf("ramdisk \"/boot/initrd.img-1.2.3\" not found"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + gotErr := d.WriteBootInfo(t.Context(), tt.cmdLine) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(v1.BootInfoPath) + require.NoError(t, err) + + var bootInfo v1.Bootinfo + err = yaml.Unmarshal(content, &bootInfo) + require.NoError(t, err) + + assert.Equal(t, tt.want, &bootInfo) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/write_build_meta_test.go b/pkg/installer/os/ubuntu/tests/write_build_meta_test.go new file mode 100644 index 0000000..2a1b308 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/write_build_meta_test.go @@ -0,0 +1,81 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + v1 "github.com/metal-stack/os-installer/api/v1" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/metal-stack/v" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_WriteBuildMeta(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + execMocks []test.FakeExecParams + want string + wantErr error + }{ + { + name: "build meta gets written", + execMocks: []test.FakeExecParams{ + { + WantCmd: []string{"ignition", "-version"}, + Output: "Ignition v0.36.2", + ExitCode: 0, + }, + }, + want: `--- +buildVersion: "456" +buildDate: "" +buildSHA: abc +buildRevision: revision +ignitionVersion: Ignition v0.36.2 +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Allocation: tt.allocation, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t, tt.execMocks...)), + }) + + v.Version = "456" + v.GitSHA1 = "abc" + v.Revision = "revision" + + gotErr := d.WriteBuildMeta(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(v1.BuildMetaPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/write_hostname_test.go b/pkg/installer/os/ubuntu/tests/write_hostname_test.go new file mode 100644 index 0000000..55b75b3 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/write_hostname_test.go @@ -0,0 +1,82 @@ +package ubuntu_test + +import ( + "log/slog" + goos "os" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_WriteHostname(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + fsMocks func(fs *afero.Afero) + want string + wantErr error + }{ + { + name: "write hostname", + allocation: &apiv2.MachineAllocation{ + Hostname: "test-hostname", + }, + want: "test-hostname", + wantErr: nil, + }, + { + name: "overwrite when already exists", + allocation: &apiv2.MachineAllocation{ + Hostname: "test-hostname", + }, + want: "test-hostname", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(oscommon.HostnameFilePath, []byte("bar"), goos.ModePerm)) + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Allocation: tt.allocation, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + gotErr := d.WriteHostname(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(oscommon.HostnameFilePath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/write_hosts_test.go b/pkg/installer/os/ubuntu/tests/write_hosts_test.go new file mode 100644 index 0000000..3f68ed7 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/write_hosts_test.go @@ -0,0 +1,85 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_WriteHosts(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + fsMocks func(fs *afero.Afero) + want string + wantErr error + }{ + { + name: "write hosts", + allocation: &apiv2.MachineAllocation{ + Hostname: "my-host", + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + }, + }, + want: `# this file was auto generated by the os-installer +127.0.0.1 localhost +10.0.16.2 my-host +`, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Allocation: tt.allocation, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + gotErr := d.WriteHosts(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(oscommon.EtcHostsPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/write_ntp_conf_test.go b/pkg/installer/os/ubuntu/tests/write_ntp_conf_test.go new file mode 100644 index 0000000..36ccba5 --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/write_ntp_conf_test.go @@ -0,0 +1,105 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_os_WriteNTPConf(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + fsMocks func(fs *afero.Afero) + want string + wantErr error + }{ + { + name: "configure custom ntp", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(oscommon.TimesyncdConfigPath, []byte(""), 0644)) + }, + allocation: &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + NtpServers: []*apiv2.NTPServer{ + {Address: "custom.1.ntp.org"}, + {Address: "custom.2.ntp.org"}, + }, + }, + want: `[Time] +NTP=custom.1.ntp.org custom.2.ntp.org +`, + wantErr: nil, + }, + { + name: "use default ntp", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(oscommon.TimesyncdConfigPath, []byte(""), 0644)) + }, + allocation: &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + }, + want: "", + wantErr: nil, + }, + { + name: "skip firewalls", + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(oscommon.TimesyncdConfigPath, []byte(""), 0644)) + }, + allocation: &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + NtpServers: []*apiv2.NTPServer{ + {Address: "custom.1.ntp.org"}, + {Address: "custom.2.ntp.org"}, + }, + }, + want: "", + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Allocation: tt.allocation, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + gotErr := d.WriteNTPConf(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(oscommon.TimesyncdConfigPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/ubuntu/tests/write_resolv_conf_test.go b/pkg/installer/os/ubuntu/tests/write_resolv_conf_test.go new file mode 100644 index 0000000..d83ce4f --- /dev/null +++ b/pkg/installer/os/ubuntu/tests/write_resolv_conf_test.go @@ -0,0 +1,94 @@ +package ubuntu_test + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/exec" + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" + "github.com/metal-stack/os-installer/pkg/installer/os/ubuntu" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Tes_os_WriteResolvConf(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + fsMocks func(fs *afero.Afero) + want string + wantErr error + }{ + { + name: "resolv.conf gets written", + allocation: &apiv2.MachineAllocation{}, + fsMocks: func(fs *afero.Afero) { + require.NoError(t, fs.WriteFile(oscommon.ResolvConfPath, []byte(""), 0755)) + }, + want: `nameserver 8.8.8.8 +nameserver 8.8.4.4 +`, + wantErr: nil, + }, + { + name: "resolv.conf gets written, file is not present", + allocation: &apiv2.MachineAllocation{}, + want: `nameserver 8.8.8.8 +nameserver 8.8.4.4 +`, + wantErr: nil, + }, + { + name: "overwrite resolv.conf with custom DNS", + allocation: &apiv2.MachineAllocation{ + DnsServers: []*apiv2.DNSServer{ + {Ip: "1.2.3.4"}, + {Ip: "5.6.7.8"}, + }, + }, + want: `nameserver 1.2.3.4 +nameserver 5.6.7.8 +`, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + log = slog.Default() + fs = &afero.Afero{ + Fs: afero.NewMemMapFs(), + } + ) + + if tt.fsMocks != nil { + tt.fsMocks(fs) + } + + d := ubuntu.New(&oscommon.Config{ + Log: log, + Fs: fs, + Allocation: tt.allocation, + Exec: exec.New(log).WithCommandFn(test.FakeCmd(t)), + }) + + gotErr := d.WriteResolvConf(t.Context()) + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(oscommon.ResolvConfPath) + require.NoError(t, err) + + assert.Equal(t, tt.want, string(content)) + }) + } +} diff --git a/pkg/installer/os/ubuntu/ubuntu.go b/pkg/installer/os/ubuntu/ubuntu.go new file mode 100644 index 0000000..6466847 --- /dev/null +++ b/pkg/installer/os/ubuntu/ubuntu.go @@ -0,0 +1,35 @@ +package ubuntu + +import ( + "context" + + oscommon "github.com/metal-stack/os-installer/pkg/installer/os/common" +) + +type ( + Os struct { + *oscommon.CommonTasks + } +) + +func New(cfg *oscommon.Config) *Os { + return &Os{ + CommonTasks: oscommon.New(cfg), + } +} + +func (o *Os) BootloaderID() string { + return "metal-ubuntu" +} + +func (o *Os) WriteBootInfo(ctx context.Context, cmdLine string) error { + return o.CommonTasks.WriteBootInfo(ctx, o.InitramdiskFormatString(), o.BootloaderID(), cmdLine) +} + +func (o *Os) CreateMetalUser(ctx context.Context) error { + return o.CommonTasks.CreateMetalUser(ctx, o.SudoGroup()) +} + +func (o *Os) GrubInstall(ctx context.Context, cmdLine string) error { + return o.CommonTasks.GrubInstall(ctx, o.BootloaderID(), cmdLine) +} diff --git a/pkg/interfaces/interfaces.go b/pkg/interfaces/interfaces.go new file mode 100644 index 0000000..b5b1e2f --- /dev/null +++ b/pkg/interfaces/interfaces.go @@ -0,0 +1,315 @@ +package interfaces + +import ( + "context" + "embed" + _ "embed" + "fmt" + "log/slog" + "path" + + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" + "github.com/spf13/afero" +) + +const ( + systemdNetworkPath = "/etc/systemd/network/" + + loNetwork interfaceKind = "lo.network.tpl" + lanLink interfaceKind = "lan.link.tpl" + lanNetwork interfaceKind = "lan.network.tpl" + + bridgeNetwork interfaceKind = "bridge.network.tpl" + bridgeNetdev interfaceKind = "bridge.netdev.tpl" + + sviNetwork interfaceKind = "svi.network.tpl" + sviNetdev interfaceKind = "svi.netdev.tpl" + vrfNetwork interfaceKind = "vrf.network.tpl" + vrfNetdev interfaceKind = "vrf.netdev.tpl" + vxlanNetwork interfaceKind = "vxlan.network.tpl" + vxlanNetdev interfaceKind = "vxlan.netdev.tpl" + + comment = "generated by os-installer" +) + +var ( + //go:embed templates + interfaceTemplates embed.FS +) + +type ( + interfaceKind string + + Config struct { + Log *slog.Logger + Network *network.Network + Nics []*apiv2.MachineNic + fs afero.Fs + } + + loData struct { + Comment string + CIDRs []string + } + + lanLinkData struct { + Comment string + Mac string + Index int + MTU int + } + + lanNetworkData struct { + Comment string + VxlanIDs []uint64 + Index int + } + + bridgeNetworkData struct { + Comment string + EVPNIfaces []network.EvpnIface + } + + bridgeNetdevData struct { + Comment string + } + + evpnData struct { + Comment string + EVPNIface network.EvpnIface + UnderlayIP string + } +) + +func ConfigureInterfaces(ctx context.Context, cfg *Config) error { + cfg.Log.Debug("create loopback interfaces") + if err := configureLoopbackInterface(ctx, cfg); err != nil { + return fmt.Errorf("error configuring loopback interface: %w", err) + } + + cfg.Log.Debug("create lan interfaces") + if err := configureLanInterfaces(ctx, cfg); err != nil { + return fmt.Errorf("error configuring lan interfaces: %w", err) + } + + if cfg.Network.IsMachine() { + return nil + } + + cfg.Log.Debug("create bridges") + if err := configureBridges(ctx, cfg); err != nil { + return fmt.Errorf("error configuring network bridges: %w", err) + } + + cfg.Log.Debug("create evpn") + if err := configureEVPN(ctx, cfg); err != nil { + return fmt.Errorf("error configuring evnps: %w", err) + } + + return nil +} + +func configureLoopbackInterface(ctx context.Context, cfg *Config) error { + loopbackCIDRs, err := cfg.Network.LoopbackCIDRs() + if err != nil { + return err + } + + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: loNetwork.mustReadTemplate(), + Data: loData{ + Comment: comment, + CIDRs: loopbackCIDRs, + }, + Fs: cfg.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, path.Join(systemdNetworkPath, "00-lo.network")) + if err != nil { + return err + } + + return nil +} + +func configureLanInterfaces(ctx context.Context, cfg *Config) error { + const offset = 10 + + for idx, nic := range cfg.Nics { + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: lanLink.mustReadTemplate(), + Data: lanLinkData{ + Comment: comment, + Mac: nic.Mac, + Index: idx, + MTU: cfg.Network.MTU(), + }, + Fs: cfg.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, path.Join(systemdNetworkPath, fmt.Sprintf("%d-lan%d.link", offset+idx, idx))) + if err != nil { + return fmt.Errorf("unable to render lan link config: %w", err) + } + + r, err = renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: lanNetwork.mustReadTemplate(), + Data: lanNetworkData{ + Comment: comment, + VxlanIDs: cfg.Network.VxlanIDs(), + Index: idx, + }, + Fs: cfg.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, path.Join(systemdNetworkPath, fmt.Sprintf("%d-lan%d.network", offset+idx, idx))) + if err != nil { + return fmt.Errorf("unable to render lan network config: %w", err) + } + } + + return nil +} + +func configureBridges(ctx context.Context, cfg *Config) error { + ifaces, err := cfg.Network.EVPNIfaces() + if err != nil { + return fmt.Errorf("unable to get evpn interfaces: %w", err) + } + + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: bridgeNetwork.mustReadTemplate(), + Data: bridgeNetworkData{ + Comment: comment, + EVPNIfaces: ifaces, + }, + Fs: cfg.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, path.Join(systemdNetworkPath, "20-bridge.network")) + if err != nil { + return fmt.Errorf("unable to render bridge network config: %w", err) + } + + r, err = renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: bridgeNetdev.mustReadTemplate(), + Data: bridgeNetdevData{ + Comment: comment, + }, + Fs: cfg.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, path.Join(systemdNetworkPath, "20-bridge.netdev")) + if err != nil { + return fmt.Errorf("unable to render bridge netdev config: %w", err) + } + + return nil +} + +func configureEVPN(ctx context.Context, cfg *Config) error { + const offset = 30 + + ifaces, err := cfg.Network.EVPNIfaces() + if err != nil { + return fmt.Errorf("unable to get evpn interfaces: %w", err) + } + + underlayIPs, err := cfg.Network.PrivatePrimaryIPs() + if err != nil { + return err + } + + for idx, iface := range ifaces { + for _, component := range []struct { + ikindnetwork interfaceKind + ikindnetdev interfaceKind + name string + }{ + { + ikindnetwork: sviNetwork, + ikindnetdev: sviNetdev, + name: "svi", + }, + { + ikindnetwork: vrfNetwork, + ikindnetdev: vrfNetdev, + name: "vrf", + }, + { + ikindnetwork: vxlanNetwork, + ikindnetdev: vxlanNetdev, + name: "vxlan", + }, + } { + data := evpnData{ + Comment: comment, + EVPNIface: iface, + UnderlayIP: underlayIPs[0], + } + + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: component.ikindnetwork.mustReadTemplate(), + Data: data, + Fs: cfg.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, path.Join(systemdNetworkPath, fmt.Sprintf("%d-%s-%d.network", offset+idx, component.name, iface.VrfID))) + if err != nil { + return fmt.Errorf("unable to render %s network config: %w", component.name, err) + } + + r, err = renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: component.ikindnetdev.mustReadTemplate(), + Data: data, + Fs: cfg.fs, + }) + if err != nil { + return err + } + + _, err = r.Render(ctx, path.Join(systemdNetworkPath, fmt.Sprintf("%d-%s-%d.netdev", offset+idx, component.name, iface.VrfID))) + if err != nil { + return fmt.Errorf("unable to render %s netdev config: %w", component.name, err) + } + } + } + + return nil +} + +func (i interfaceKind) mustReadTemplate() string { + tpl, err := interfaceTemplates.ReadFile(path.Join("templates", string(i))) + if err != nil { + panic(err) + } + + return string(tpl) +} diff --git a/pkg/interfaces/interfaces_test.go b/pkg/interfaces/interfaces_test.go new file mode 100644 index 0000000..49bbfd0 --- /dev/null +++ b/pkg/interfaces/interfaces_test.go @@ -0,0 +1,341 @@ +package interfaces + +import ( + "embed" + "log/slog" + "path" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test + expectedInterfaceFiles embed.FS + + machineAllocation = &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Ips: []string{"10.0.17.2"}, + }, + { + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + }, + { + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"100.127.129.1"}, + }, + { + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2001::4"}, + }, + { + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Ips: []string{"10.1.0.1"}, + }, + }, + } + + firewallAllocation = &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Vrf: 104009, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Ips: []string{"10.1.0.1"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"100.127.129.1"}, + Vrf: 104010, + }, + { + Network: "internet-v6", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2001::4"}, + }, + }, + } +) + +func Test_configureLoopbackInterface(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + wantFilePath string + wantErr error + }{ + { + name: "render machine", + allocation: machineAllocation, + wantFilePath: "machine/00-lo.network", + wantErr: nil, + }, + { + name: "render firewall", + allocation: firewallAllocation, + wantFilePath: "firewall/00-lo.network", + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotErr := configureLoopbackInterface(t.Context(), &Config{ + Log: slog.Default(), + fs: fs, + Network: network.New(tt.allocation), + }) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(path.Join(systemdNetworkPath, "00-lo.network")) + require.NoError(t, err) + + if diff := cmp.Diff(mustReadExpected(tt.wantFilePath), string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} + +func Test_configureLanInterface(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + nics []*apiv2.MachineNic + wantFilePaths []string + wantErr error + }{ + { + name: "render machine", + allocation: machineAllocation, + nics: []*apiv2.MachineNic{ + { + Mac: "00:03:00:11:11:01", + }, + { + Mac: "00:03:00:11:12:01", + }, + }, + wantFilePaths: []string{ + "machine/10-lan0.link", + "machine/10-lan0.network", + "machine/11-lan1.link", + "machine/11-lan1.network", + }, + wantErr: nil, + }, + { + name: "render firewall", + allocation: firewallAllocation, + nics: []*apiv2.MachineNic{ + { + Mac: "00:03:00:11:11:01", + }, + { + Mac: "00:03:00:11:12:01", + }, + }, + wantFilePaths: []string{ + "firewall/10-lan0.link", + "firewall/10-lan0.network", + "firewall/11-lan1.link", + "firewall/11-lan1.network", + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotErr := configureLanInterfaces(t.Context(), &Config{ + Log: slog.Default(), + fs: fs, + Network: network.New(tt.allocation), + Nics: tt.nics, + }) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + for _, name := range tt.wantFilePaths { + content, err := fs.ReadFile(path.Join(systemdNetworkPath, path.Base(name))) + require.NoError(t, err) + + if diff := cmp.Diff(mustReadExpected(name), string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + } + }) + } +} + +func Test_configureBridges(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + wantFilePaths []string + wantErr error + }{ + { + name: "render firewall", + allocation: firewallAllocation, + wantFilePaths: []string{ + "firewall/20-bridge.network", + "firewall/20-bridge.netdev", + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotErr := configureBridges(t.Context(), &Config{ + Log: slog.Default(), + fs: fs, + Network: network.New(tt.allocation), + }) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + for _, name := range tt.wantFilePaths { + content, err := fs.ReadFile(path.Join(systemdNetworkPath, path.Base(name))) + require.NoError(t, err) + + if diff := cmp.Diff(mustReadExpected(name), string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + } + }) + } +} + +func Test_configureEVPN(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + wantFilePaths []string + wantErr error + }{ + { + name: "render firewall", + allocation: firewallAllocation, + wantFilePaths: []string{ + "firewall/30-svi-3981.network", + "firewall/30-svi-3981.netdev", + "firewall/31-svi-3982.network", + "firewall/31-svi-3982.netdev", + "firewall/32-svi-104009.network", + "firewall/32-svi-104009.netdev", + "firewall/33-svi-104010.network", + "firewall/33-svi-104010.netdev", + + "firewall/30-vrf-3981.network", + "firewall/30-vrf-3981.netdev", + "firewall/31-vrf-3982.network", + "firewall/31-vrf-3982.netdev", + "firewall/32-vrf-104009.network", + "firewall/32-vrf-104009.netdev", + "firewall/33-vrf-104010.network", + "firewall/33-vrf-104010.netdev", + + "firewall/30-vxlan-3981.network", + "firewall/30-vxlan-3981.netdev", + "firewall/31-vxlan-3982.network", + "firewall/31-vxlan-3982.netdev", + "firewall/32-vxlan-104009.network", + "firewall/32-vxlan-104009.netdev", + "firewall/33-vxlan-104010.network", + "firewall/33-vxlan-104010.netdev", + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotErr := configureEVPN(t.Context(), &Config{ + Log: slog.Default(), + fs: fs, + Network: network.New(tt.allocation), + }) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + for _, name := range tt.wantFilePaths { + content, err := fs.ReadFile(path.Join(systemdNetworkPath, path.Base(name))) + require.NoError(t, err) + + if diff := cmp.Diff(mustReadExpected(name), string(content)); diff != "" { + t.Errorf("diff = %s", diff) + } + } + }) + } +} + +func mustReadExpected(name string) string { + tpl, err := expectedInterfaceFiles.ReadFile(path.Join("test", name)) + if err != nil { + panic(err) + } + + return string(tpl) +} diff --git a/pkg/interfaces/templates/bridge.netdev.tpl b/pkg/interfaces/templates/bridge.netdev.tpl new file mode 100644 index 0000000..bb7bf8a --- /dev/null +++ b/pkg/interfaces/templates/bridge.netdev.tpl @@ -0,0 +1,9 @@ +# {{ .Comment }} +[NetDev] +Name=bridge +Kind=bridge +MTUBytes=9000 + +[Bridge] +DefaultPVID=none +VLANFiltering=yes diff --git a/pkg/interfaces/templates/bridge.network.tpl b/pkg/interfaces/templates/bridge.network.tpl new file mode 100644 index 0000000..66f2eca --- /dev/null +++ b/pkg/interfaces/templates/bridge.network.tpl @@ -0,0 +1,13 @@ +# {{ .Comment }} +[Match] +Name=bridge + +[Network] +{{- range .EVPNIfaces }} +VLAN=vlan{{ .VrfID }} +{{- end }} +{{- range .EVPNIfaces }} + +[BridgeVLAN] +VLAN={{ .VlanID }} +{{- end }} diff --git a/pkg/interfaces/templates/lan.link.tpl b/pkg/interfaces/templates/lan.link.tpl new file mode 100644 index 0000000..d41161e --- /dev/null +++ b/pkg/interfaces/templates/lan.link.tpl @@ -0,0 +1,8 @@ +# {{ .Comment }} +[Match] +PermanentMACAddress={{ .Mac }} + +[Link] +Name=lan{{ .Index }} +NamePolicy= +MTUBytes={{ .MTU }} diff --git a/pkg/interfaces/templates/lan.network.tpl b/pkg/interfaces/templates/lan.network.tpl new file mode 100644 index 0000000..7430196 --- /dev/null +++ b/pkg/interfaces/templates/lan.network.tpl @@ -0,0 +1,9 @@ +# {{ .Comment }} +[Match] +Name=lan{{ .Index }} + +[Network] +IPv6AcceptRA=no +{{- range .VxlanIDs }} +VXLAN=vni{{ . }} +{{- end }} diff --git a/pkg/interfaces/templates/lo.network.tpl b/pkg/interfaces/templates/lo.network.tpl new file mode 100644 index 0000000..563cb75 --- /dev/null +++ b/pkg/interfaces/templates/lo.network.tpl @@ -0,0 +1,11 @@ +# {{ .Comment }} +[Match] +Name=lo + +[Address] +Address=127.0.0.1/8 +{{- range .CIDRs }} + +[Address] +Address={{ . }} +{{- end }} diff --git a/pkg/interfaces/templates/svi.netdev.tpl b/pkg/interfaces/templates/svi.netdev.tpl new file mode 100644 index 0000000..123c410 --- /dev/null +++ b/pkg/interfaces/templates/svi.netdev.tpl @@ -0,0 +1,8 @@ +# {{ .Comment }} +# network: {{ .EVPNIface.Network }} +[NetDev] +Name=vlan{{ .EVPNIface.VrfID }} +Kind=vlan + +[VLAN] +Id={{ .EVPNIface.VlanID }} diff --git a/pkg/interfaces/templates/svi.network.tpl b/pkg/interfaces/templates/svi.network.tpl new file mode 100644 index 0000000..b059829 --- /dev/null +++ b/pkg/interfaces/templates/svi.network.tpl @@ -0,0 +1,13 @@ +# {{ .Comment }} +# network: {{ .EVPNIface.Network }} +[Match] +Name=vlan{{ .EVPNIface.VrfID }} + +[Link] +MTUBytes=9000 + +[Network] +VRF=vrf{{ .EVPNIface.VrfID }} +{{- range .EVPNIface.CIDRs }} +Address={{ . }} +{{- end }} diff --git a/pkg/interfaces/templates/vrf.netdev.tpl b/pkg/interfaces/templates/vrf.netdev.tpl new file mode 100644 index 0000000..9e5e840 --- /dev/null +++ b/pkg/interfaces/templates/vrf.netdev.tpl @@ -0,0 +1,8 @@ +# {{ .Comment }} +# network: {{ .EVPNIface.Network }} +[NetDev] +Name=vrf{{ .EVPNIface.VrfID }} +Kind=vrf + +[VRF] +Table={{ .EVPNIface.VlanID }} diff --git a/pkg/interfaces/templates/vrf.network.tpl b/pkg/interfaces/templates/vrf.network.tpl new file mode 100644 index 0000000..a24e0bc --- /dev/null +++ b/pkg/interfaces/templates/vrf.network.tpl @@ -0,0 +1,4 @@ +# {{ .Comment }} +# network: {{ .EVPNIface.Network }} +[Match] +Name=vrf{{ .EVPNIface.VrfID }} diff --git a/pkg/interfaces/templates/vxlan.netdev.tpl b/pkg/interfaces/templates/vxlan.netdev.tpl new file mode 100644 index 0000000..23e7d38 --- /dev/null +++ b/pkg/interfaces/templates/vxlan.netdev.tpl @@ -0,0 +1,12 @@ +# {{ .Comment }} +# network: {{ .EVPNIface.Network }} +[NetDev] +Name=vni{{ .EVPNIface.VrfID }} +Kind=vxlan + +[VXLAN] +VNI={{ .EVPNIface.VrfID }} +Local={{ .UnderlayIP }} +UDPChecksum=true +MacLearning=false +DestinationPort=4789 diff --git a/pkg/interfaces/templates/vxlan.network.tpl b/pkg/interfaces/templates/vxlan.network.tpl new file mode 100644 index 0000000..b18f4fc --- /dev/null +++ b/pkg/interfaces/templates/vxlan.network.tpl @@ -0,0 +1,14 @@ +# {{ .Comment }} +# network: {{ .EVPNIface.Network }} +[Match] +Name=vni{{ .EVPNIface.VrfID }} + +[Link] +MTUBytes=9000 + +[Network] +Bridge=bridge + +[BridgeVLAN] +PVID={{ .EVPNIface.VlanID }} +EgressUntagged={{ .EVPNIface.VlanID }} diff --git a/pkg/network/testdata/networkd/firewall/00-lo.network b/pkg/interfaces/test/firewall/00-lo.network similarity index 52% rename from pkg/network/testdata/networkd/firewall/00-lo.network rename to pkg/interfaces/test/firewall/00-lo.network index 4cb3725..280382a 100644 --- a/pkg/network/testdata/networkd/firewall/00-lo.network +++ b/pkg/interfaces/test/firewall/00-lo.network @@ -1,4 +1,4 @@ -# networkid: underlay-vagrant-lab +# generated by os-installer [Match] Name=lo @@ -6,4 +6,4 @@ Name=lo Address=127.0.0.1/8 [Address] -Address=10.1.0.1/32 \ No newline at end of file +Address=10.1.0.1/32 diff --git a/pkg/interfaces/test/firewall/10-lan0.link b/pkg/interfaces/test/firewall/10-lan0.link new file mode 100644 index 0000000..186fcfa --- /dev/null +++ b/pkg/interfaces/test/firewall/10-lan0.link @@ -0,0 +1,8 @@ +# generated by os-installer +[Match] +PermanentMACAddress=00:03:00:11:11:01 + +[Link] +Name=lan0 +NamePolicy= +MTUBytes=9216 diff --git a/pkg/interfaces/test/firewall/10-lan0.network b/pkg/interfaces/test/firewall/10-lan0.network new file mode 100644 index 0000000..c7f1767 --- /dev/null +++ b/pkg/interfaces/test/firewall/10-lan0.network @@ -0,0 +1,10 @@ +# generated by os-installer +[Match] +Name=lan0 + +[Network] +IPv6AcceptRA=no +VXLAN=vni3981 +VXLAN=vni3982 +VXLAN=vni104009 +VXLAN=vni104010 diff --git a/pkg/interfaces/test/firewall/11-lan1.link b/pkg/interfaces/test/firewall/11-lan1.link new file mode 100644 index 0000000..9179910 --- /dev/null +++ b/pkg/interfaces/test/firewall/11-lan1.link @@ -0,0 +1,8 @@ +# generated by os-installer +[Match] +PermanentMACAddress=00:03:00:11:12:01 + +[Link] +Name=lan1 +NamePolicy= +MTUBytes=9216 diff --git a/pkg/interfaces/test/firewall/11-lan1.network b/pkg/interfaces/test/firewall/11-lan1.network new file mode 100644 index 0000000..a6df3e6 --- /dev/null +++ b/pkg/interfaces/test/firewall/11-lan1.network @@ -0,0 +1,10 @@ +# generated by os-installer +[Match] +Name=lan1 + +[Network] +IPv6AcceptRA=no +VXLAN=vni3981 +VXLAN=vni3982 +VXLAN=vni104009 +VXLAN=vni104010 diff --git a/pkg/interfaces/test/firewall/20-bridge.netdev b/pkg/interfaces/test/firewall/20-bridge.netdev new file mode 100644 index 0000000..2723c13 --- /dev/null +++ b/pkg/interfaces/test/firewall/20-bridge.netdev @@ -0,0 +1,9 @@ +# generated by os-installer +[NetDev] +Name=bridge +Kind=bridge +MTUBytes=9000 + +[Bridge] +DefaultPVID=none +VLANFiltering=yes diff --git a/pkg/network/testdata/networkd/firewall/20-bridge.network b/pkg/interfaces/test/firewall/20-bridge.network similarity index 58% rename from pkg/network/testdata/networkd/firewall/20-bridge.network rename to pkg/interfaces/test/firewall/20-bridge.network index 9ed8e6a..5a6e1e9 100644 --- a/pkg/network/testdata/networkd/firewall/20-bridge.network +++ b/pkg/interfaces/test/firewall/20-bridge.network @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer [Match] Name=bridge @@ -19,4 +18,4 @@ VLAN=1001 VLAN=1002 [BridgeVLAN] -VLAN=1004 \ No newline at end of file +VLAN=1004 diff --git a/pkg/interfaces/test/firewall/30-svi-3981.netdev b/pkg/interfaces/test/firewall/30-svi-3981.netdev new file mode 100644 index 0000000..e281323 --- /dev/null +++ b/pkg/interfaces/test/firewall/30-svi-3981.netdev @@ -0,0 +1,8 @@ +# generated by os-installer +# network: 379d294d-22e8-4aed-82e1-62c6c2f08d6a +[NetDev] +Name=vlan3981 +Kind=vlan + +[VLAN] +Id=1000 diff --git a/pkg/network/testdata/networkd/firewall/30-svi-3981.network b/pkg/interfaces/test/firewall/30-svi-3981.network similarity index 53% rename from pkg/network/testdata/networkd/firewall/30-svi-3981.network rename to pkg/interfaces/test/firewall/30-svi-3981.network index ff73189..e57cd63 100644 --- a/pkg/network/testdata/networkd/firewall/30-svi-3981.network +++ b/pkg/interfaces/test/firewall/30-svi-3981.network @@ -1,4 +1,5 @@ -# svi (networkid: bc830818-2df1-4904-8c40-4322296d393d) +# generated by os-installer +# network: 379d294d-22e8-4aed-82e1-62c6c2f08d6a [Match] Name=vlan3981 diff --git a/pkg/interfaces/test/firewall/30-vrf-3981.netdev b/pkg/interfaces/test/firewall/30-vrf-3981.netdev new file mode 100644 index 0000000..e58d053 --- /dev/null +++ b/pkg/interfaces/test/firewall/30-vrf-3981.netdev @@ -0,0 +1,8 @@ +# generated by os-installer +# network: 379d294d-22e8-4aed-82e1-62c6c2f08d6a +[NetDev] +Name=vrf3981 +Kind=vrf + +[VRF] +Table=1000 diff --git a/pkg/interfaces/test/firewall/30-vrf-3981.network b/pkg/interfaces/test/firewall/30-vrf-3981.network new file mode 100644 index 0000000..46bf5b3 --- /dev/null +++ b/pkg/interfaces/test/firewall/30-vrf-3981.network @@ -0,0 +1,4 @@ +# generated by os-installer +# network: 379d294d-22e8-4aed-82e1-62c6c2f08d6a +[Match] +Name=vrf3981 diff --git a/pkg/network/testdata/networkd/firewall/30-vxlan-3981.netdev b/pkg/interfaces/test/firewall/30-vxlan-3981.netdev similarity index 61% rename from pkg/network/testdata/networkd/firewall/30-vxlan-3981.netdev rename to pkg/interfaces/test/firewall/30-vxlan-3981.netdev index 4faac86..a413ac8 100644 --- a/pkg/network/testdata/networkd/firewall/30-vxlan-3981.netdev +++ b/pkg/interfaces/test/firewall/30-vxlan-3981.netdev @@ -1,4 +1,5 @@ -# vxlan (networkid: bc830818-2df1-4904-8c40-4322296d393d) +# generated by os-installer +# network: 379d294d-22e8-4aed-82e1-62c6c2f08d6a [NetDev] Name=vni3981 Kind=vxlan diff --git a/pkg/network/testdata/networkd/firewall/30-vxlan-3981.network b/pkg/interfaces/test/firewall/30-vxlan-3981.network similarity index 59% rename from pkg/network/testdata/networkd/firewall/30-vxlan-3981.network rename to pkg/interfaces/test/firewall/30-vxlan-3981.network index 0a51049..2645337 100644 --- a/pkg/network/testdata/networkd/firewall/30-vxlan-3981.network +++ b/pkg/interfaces/test/firewall/30-vxlan-3981.network @@ -1,4 +1,5 @@ -# vxlan (networkid: bc830818-2df1-4904-8c40-4322296d393d) +# generated by os-installer +# network: 379d294d-22e8-4aed-82e1-62c6c2f08d6a [Match] Name=vni3981 diff --git a/pkg/interfaces/test/firewall/31-svi-3982.netdev b/pkg/interfaces/test/firewall/31-svi-3982.netdev new file mode 100644 index 0000000..17d489f --- /dev/null +++ b/pkg/interfaces/test/firewall/31-svi-3982.netdev @@ -0,0 +1,8 @@ +# generated by os-installer +# network: partition-storage +[NetDev] +Name=vlan3982 +Kind=vlan + +[VLAN] +Id=1001 diff --git a/pkg/network/testdata/networkd/firewall/31-svi-3982.network b/pkg/interfaces/test/firewall/31-svi-3982.network similarity index 60% rename from pkg/network/testdata/networkd/firewall/31-svi-3982.network rename to pkg/interfaces/test/firewall/31-svi-3982.network index 855cf4d..2608d7e 100644 --- a/pkg/network/testdata/networkd/firewall/31-svi-3982.network +++ b/pkg/interfaces/test/firewall/31-svi-3982.network @@ -1,4 +1,5 @@ -# svi (networkid: storage-net) +# generated by os-installer +# network: partition-storage [Match] Name=vlan3982 diff --git a/pkg/interfaces/test/firewall/31-vrf-3982.netdev b/pkg/interfaces/test/firewall/31-vrf-3982.netdev new file mode 100644 index 0000000..cc70188 --- /dev/null +++ b/pkg/interfaces/test/firewall/31-vrf-3982.netdev @@ -0,0 +1,8 @@ +# generated by os-installer +# network: partition-storage +[NetDev] +Name=vrf3982 +Kind=vrf + +[VRF] +Table=1001 diff --git a/pkg/interfaces/test/firewall/31-vrf-3982.network b/pkg/interfaces/test/firewall/31-vrf-3982.network new file mode 100644 index 0000000..9293265 --- /dev/null +++ b/pkg/interfaces/test/firewall/31-vrf-3982.network @@ -0,0 +1,4 @@ +# generated by os-installer +# network: partition-storage +[Match] +Name=vrf3982 diff --git a/pkg/network/testdata/networkd/firewall/31-vxlan-3982.netdev b/pkg/interfaces/test/firewall/31-vxlan-3982.netdev similarity index 68% rename from pkg/network/testdata/networkd/firewall/31-vxlan-3982.netdev rename to pkg/interfaces/test/firewall/31-vxlan-3982.netdev index e909e12..9559950 100644 --- a/pkg/network/testdata/networkd/firewall/31-vxlan-3982.netdev +++ b/pkg/interfaces/test/firewall/31-vxlan-3982.netdev @@ -1,4 +1,5 @@ -# vxlan (networkid: storage-net) +# generated by os-installer +# network: partition-storage [NetDev] Name=vni3982 Kind=vxlan diff --git a/pkg/network/testdata/networkd/firewall/31-vxlan-3982.network b/pkg/interfaces/test/firewall/31-vxlan-3982.network similarity index 66% rename from pkg/network/testdata/networkd/firewall/31-vxlan-3982.network rename to pkg/interfaces/test/firewall/31-vxlan-3982.network index 204c6b3..70b855c 100644 --- a/pkg/network/testdata/networkd/firewall/31-vxlan-3982.network +++ b/pkg/interfaces/test/firewall/31-vxlan-3982.network @@ -1,4 +1,5 @@ -# vxlan (networkid: storage-net) +# generated by os-installer +# network: partition-storage [Match] Name=vni3982 diff --git a/pkg/network/testdata/networkd/firewall/32-svi-104009.netdev b/pkg/interfaces/test/firewall/32-svi-104009.netdev similarity index 51% rename from pkg/network/testdata/networkd/firewall/32-svi-104009.netdev rename to pkg/interfaces/test/firewall/32-svi-104009.netdev index f941ea3..a4b9d7f 100644 --- a/pkg/network/testdata/networkd/firewall/32-svi-104009.netdev +++ b/pkg/interfaces/test/firewall/32-svi-104009.netdev @@ -1,4 +1,5 @@ -# svi (networkid: internet-vagrant-lab) +# generated by os-installer +# network: internet [NetDev] Name=vlan104009 Kind=vlan diff --git a/pkg/network/testdata/networkd/firewall/32-svi-104009.network b/pkg/interfaces/test/firewall/32-svi-104009.network similarity index 65% rename from pkg/network/testdata/networkd/firewall/32-svi-104009.network rename to pkg/interfaces/test/firewall/32-svi-104009.network index e8e16d8..9daaae6 100644 --- a/pkg/network/testdata/networkd/firewall/32-svi-104009.network +++ b/pkg/interfaces/test/firewall/32-svi-104009.network @@ -1,4 +1,5 @@ -# svi (networkid: internet-vagrant-lab) +# generated by os-installer +# network: internet [Match] Name=vlan104009 diff --git a/pkg/interfaces/test/firewall/32-vrf-104009.netdev b/pkg/interfaces/test/firewall/32-vrf-104009.netdev new file mode 100644 index 0000000..1f9316b --- /dev/null +++ b/pkg/interfaces/test/firewall/32-vrf-104009.netdev @@ -0,0 +1,8 @@ +# generated by os-installer +# network: internet +[NetDev] +Name=vrf104009 +Kind=vrf + +[VRF] +Table=1002 diff --git a/pkg/interfaces/test/firewall/32-vrf-104009.network b/pkg/interfaces/test/firewall/32-vrf-104009.network new file mode 100644 index 0000000..da70561 --- /dev/null +++ b/pkg/interfaces/test/firewall/32-vrf-104009.network @@ -0,0 +1,4 @@ +# generated by os-installer +# network: internet +[Match] +Name=vrf104009 diff --git a/pkg/network/testdata/networkd/firewall/32-vxlan-104009.netdev b/pkg/interfaces/test/firewall/32-vxlan-104009.netdev similarity index 72% rename from pkg/network/testdata/networkd/firewall/32-vxlan-104009.netdev rename to pkg/interfaces/test/firewall/32-vxlan-104009.netdev index 43ed598..1f0be16 100644 --- a/pkg/network/testdata/networkd/firewall/32-vxlan-104009.netdev +++ b/pkg/interfaces/test/firewall/32-vxlan-104009.netdev @@ -1,4 +1,5 @@ -# vxlan (networkid: internet-vagrant-lab) +# generated by os-installer +# network: internet [NetDev] Name=vni104009 Kind=vxlan diff --git a/pkg/network/testdata/networkd/firewall/32-vxlan-104009.network b/pkg/interfaces/test/firewall/32-vxlan-104009.network similarity index 70% rename from pkg/network/testdata/networkd/firewall/32-vxlan-104009.network rename to pkg/interfaces/test/firewall/32-vxlan-104009.network index ea24f09..1db18e0 100644 --- a/pkg/network/testdata/networkd/firewall/32-vxlan-104009.network +++ b/pkg/interfaces/test/firewall/32-vxlan-104009.network @@ -1,4 +1,5 @@ -# vxlan (networkid: internet-vagrant-lab) +# generated by os-installer +# network: internet [Match] Name=vni104009 diff --git a/pkg/network/testdata/networkd/firewall/33-svi-104010.netdev b/pkg/interfaces/test/firewall/33-svi-104010.netdev similarity index 53% rename from pkg/network/testdata/networkd/firewall/33-svi-104010.netdev rename to pkg/interfaces/test/firewall/33-svi-104010.netdev index d1e68a3..7e80ab2 100644 --- a/pkg/network/testdata/networkd/firewall/33-svi-104010.netdev +++ b/pkg/interfaces/test/firewall/33-svi-104010.netdev @@ -1,4 +1,5 @@ -# svi (networkid: mpls-nbg-w8101-test) +# generated by os-installer +# network: mpls [NetDev] Name=vlan104010 Kind=vlan diff --git a/pkg/network/testdata/networkd/firewall/33-svi-104010.network b/pkg/interfaces/test/firewall/33-svi-104010.network similarity index 68% rename from pkg/network/testdata/networkd/firewall/33-svi-104010.network rename to pkg/interfaces/test/firewall/33-svi-104010.network index 11165a4..4665e50 100644 --- a/pkg/network/testdata/networkd/firewall/33-svi-104010.network +++ b/pkg/interfaces/test/firewall/33-svi-104010.network @@ -1,4 +1,5 @@ -# svi (networkid: mpls-nbg-w8101-test) +# generated by os-installer +# network: mpls [Match] Name=vlan104010 diff --git a/pkg/interfaces/test/firewall/33-vrf-104010.netdev b/pkg/interfaces/test/firewall/33-vrf-104010.netdev new file mode 100644 index 0000000..d3f564d --- /dev/null +++ b/pkg/interfaces/test/firewall/33-vrf-104010.netdev @@ -0,0 +1,8 @@ +# generated by os-installer +# network: mpls +[NetDev] +Name=vrf104010 +Kind=vrf + +[VRF] +Table=1004 diff --git a/pkg/interfaces/test/firewall/33-vrf-104010.network b/pkg/interfaces/test/firewall/33-vrf-104010.network new file mode 100644 index 0000000..51e2fc5 --- /dev/null +++ b/pkg/interfaces/test/firewall/33-vrf-104010.network @@ -0,0 +1,4 @@ +# generated by os-installer +# network: mpls +[Match] +Name=vrf104010 diff --git a/pkg/network/testdata/networkd/firewall/33-vxlan-104010.netdev b/pkg/interfaces/test/firewall/33-vxlan-104010.netdev similarity index 74% rename from pkg/network/testdata/networkd/firewall/33-vxlan-104010.netdev rename to pkg/interfaces/test/firewall/33-vxlan-104010.netdev index 55ac87b..6e3336f 100644 --- a/pkg/network/testdata/networkd/firewall/33-vxlan-104010.netdev +++ b/pkg/interfaces/test/firewall/33-vxlan-104010.netdev @@ -1,4 +1,5 @@ -# vxlan (networkid: mpls-nbg-w8101-test) +# generated by os-installer +# network: mpls [NetDev] Name=vni104010 Kind=vxlan diff --git a/pkg/network/testdata/networkd/firewall/33-vxlan-104010.network b/pkg/interfaces/test/firewall/33-vxlan-104010.network similarity index 72% rename from pkg/network/testdata/networkd/firewall/33-vxlan-104010.network rename to pkg/interfaces/test/firewall/33-vxlan-104010.network index fff9745..bdb5b09 100644 --- a/pkg/network/testdata/networkd/firewall/33-vxlan-104010.network +++ b/pkg/interfaces/test/firewall/33-vxlan-104010.network @@ -1,4 +1,5 @@ -# vxlan (networkid: mpls-nbg-w8101-test) +# generated by os-installer +# network: mpls [Match] Name=vni104010 diff --git a/pkg/network/testdata/networkd/machine/00-lo.network b/pkg/interfaces/test/machine/00-lo.network similarity index 59% rename from pkg/network/testdata/networkd/machine/00-lo.network rename to pkg/interfaces/test/machine/00-lo.network index ec7ec41..08d73ee 100644 --- a/pkg/network/testdata/networkd/machine/00-lo.network +++ b/pkg/interfaces/test/machine/00-lo.network @@ -1,4 +1,4 @@ -# networkid: bc830818-2df1-4904-8c40-4322296d393d +# generated by os-installer [Match] Name=lo @@ -12,4 +12,7 @@ Address=10.0.17.2/32 Address=185.1.2.3/32 [Address] -Address=100.127.129.1/32 \ No newline at end of file +Address=100.127.129.1/32 + +[Address] +Address=2001::4/128 diff --git a/pkg/interfaces/test/machine/10-lan0.link b/pkg/interfaces/test/machine/10-lan0.link new file mode 100644 index 0000000..c98eb3d --- /dev/null +++ b/pkg/interfaces/test/machine/10-lan0.link @@ -0,0 +1,8 @@ +# generated by os-installer +[Match] +PermanentMACAddress=00:03:00:11:11:01 + +[Link] +Name=lan0 +NamePolicy= +MTUBytes=9000 diff --git a/pkg/interfaces/test/machine/10-lan0.network b/pkg/interfaces/test/machine/10-lan0.network new file mode 100644 index 0000000..aba2532 --- /dev/null +++ b/pkg/interfaces/test/machine/10-lan0.network @@ -0,0 +1,6 @@ +# generated by os-installer +[Match] +Name=lan0 + +[Network] +IPv6AcceptRA=no diff --git a/pkg/interfaces/test/machine/11-lan1.link b/pkg/interfaces/test/machine/11-lan1.link new file mode 100644 index 0000000..2be7376 --- /dev/null +++ b/pkg/interfaces/test/machine/11-lan1.link @@ -0,0 +1,8 @@ +# generated by os-installer +[Match] +PermanentMACAddress=00:03:00:11:12:01 + +[Link] +Name=lan1 +NamePolicy= +MTUBytes=9000 diff --git a/pkg/interfaces/test/machine/11-lan1.network b/pkg/interfaces/test/machine/11-lan1.network new file mode 100644 index 0000000..284c8a3 --- /dev/null +++ b/pkg/interfaces/test/machine/11-lan1.network @@ -0,0 +1,6 @@ +# generated by os-installer +[Match] +Name=lan1 + +[Network] +IPv6AcceptRA=no diff --git a/pkg/net/README.md b/pkg/net/README.md deleted file mode 100644 index 6dd7df1..0000000 --- a/pkg/net/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# network - -Network can apply changes to `/etc/network/interfaces` and `/etc/frr/frr.conf`. - -It was intentionally created to provide a common means to: - -- apply validation -- render interfaces/frr.conf files -- reload required services to apply changes - -## Requirements - -Network lib relies on `ifupdown2` and `systemd`. It also is assumed frr is installed as systemd service. - -## Usage - -Make use network lib: - -```go -package main - -import "github.com/metal-stack/os-installer/pkg/net" - -func main() { - // TODO -} - -``` \ No newline at end of file diff --git a/pkg/net/applier.go b/pkg/net/applier.go deleted file mode 100644 index 89ecf38..0000000 --- a/pkg/net/applier.go +++ /dev/null @@ -1,128 +0,0 @@ -package net - -import ( - "bufio" - "bytes" - "crypto/sha256" - "io" - "os" - "text/template" -) - -// Applier is an interface to render changes and reload services to apply them. -type Applier interface { - Apply(tpl template.Template, tmpFile, destFile string, reload bool) (bool, error) - Render(writer io.Writer, tpl template.Template) error - Reload() error - Validate() error - Compare(tmpFile, destFile string) bool -} - -// networkApplier holds the toolset for applying network configuration changes. -type networkApplier struct { - data any - validator Validator - reloader Reloader -} - -// NewNetworkApplier creates a new NewNetworkApplier. -func NewNetworkApplier(data any, validator Validator, reloader Reloader) Applier { - return &networkApplier{data: data, validator: validator, reloader: reloader} -} - -// Apply applies the current configuration with the given template. -func (n *networkApplier) Apply(tpl template.Template, tmpFile, destFile string, reload bool) (bool, error) { - f, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return false, err - } - - defer func() { - _ = f.Close() - }() - - w := bufio.NewWriter(f) - err = n.Render(w, tpl) - if err != nil { - return false, err - } - - err = w.Flush() - if err != nil { - return false, err - } - - err = n.Validate() - if err != nil { - return false, err - } - - equal := n.Compare(tmpFile, destFile) - if equal { - return false, nil - } - - err = os.Rename(tmpFile, destFile) - if err != nil { - return false, err - } - - if !reload { - return true, nil - } - - err = n.Reload() - if err != nil { - return true, err - } - - return true, nil -} - -// Render renders the network interfaces to the given writer using the given template. -func (n *networkApplier) Render(w io.Writer, tpl template.Template) error { - return tpl.Execute(w, n.data) -} - -// Validate applies the given validator to validate current changes. -func (n *networkApplier) Validate() error { - return n.validator.Validate() -} - -// Reload reloads the necessary services when the network interfaces configuration was changed. -func (n *networkApplier) Reload() error { - return n.reloader.Reload() -} - -// Compare compare source and target for hash equality. -func (n *networkApplier) Compare(source, target string) bool { - sourceChecksum, err := checksum(source) - if err != nil { - return false - } - - targetChecksum, err := checksum(target) - if err != nil { - return false - } - - return bytes.Equal(sourceChecksum, targetChecksum) -} - -func checksum(file string) ([]byte, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - - defer func() { - _ = f.Close() - }() - - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return nil, err - } - - return h.Sum(nil), nil -} diff --git a/pkg/net/applier_test.go b/pkg/net/applier_test.go deleted file mode 100644 index 2164419..0000000 --- a/pkg/net/applier_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package net - -import "testing" - -func TestNetworkApplier_Compare(t *testing.T) { - tests := []struct { - name string - source string - target string - want bool - }{ - { - name: "simple test", - source: "/etc/hostname", - target: "/etc/passwd", - want: false, - }, - { - name: "simple test", - source: "/etc/hostname", - target: "/etc/hostname", - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - n := &networkApplier{} - if got := n.Compare(tt.source, tt.target); got != tt.want { - t.Errorf("NetworkApplier.Compare() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/net/doc.go b/pkg/net/doc.go deleted file mode 100644 index 94b9ea7..0000000 --- a/pkg/net/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package net contains code to apply changes to network interfaces and FRR (Free Range Routing). -package net diff --git a/pkg/net/reloader.go b/pkg/net/reloader.go deleted file mode 100644 index e876cd6..0000000 --- a/pkg/net/reloader.go +++ /dev/null @@ -1,51 +0,0 @@ -package net - -import ( - "context" - "fmt" - - "github.com/coreos/go-systemd/v22/dbus" -) - -const done = "done" - -// Reloader triggers the reload to carry out the changes of an applier. -type Reloader interface { - Reload() error -} - -// NewDBusReloader is a reloader for systemd units with dbus. -func NewDBusReloader(service string) dbusReloader { - return dbusReloader{ - serviceFilename: service, - } -} - -// dbusReloader applies a systemd unit reload to apply reloading. -type dbusReloader struct { - serviceFilename string -} - -// Reload reloads a systemd unit. -func (r dbusReloader) Reload() error { - ctx := context.Background() - dbc, err := dbus.NewWithContext(ctx) - if err != nil { - return fmt.Errorf("unable to connect to dbus: %w", err) - } - defer dbc.Close() - - c := make(chan string) - _, err = dbc.ReloadUnitContext(ctx, r.serviceFilename, "replace", c) - - if err != nil { - return err - } - - job := <-c - if job != done { - return fmt.Errorf("reloading failed %s", job) - } - - return nil -} diff --git a/pkg/net/validator.go b/pkg/net/validator.go deleted file mode 100644 index bf2f67f..0000000 --- a/pkg/net/validator.go +++ /dev/null @@ -1,6 +0,0 @@ -package net - -// Validator is an interface to apply common validation. -type Validator interface { - Validate() error -} diff --git a/pkg/network/chrony.go b/pkg/network/chrony.go deleted file mode 100644 index 882fdc6..0000000 --- a/pkg/network/chrony.go +++ /dev/null @@ -1,40 +0,0 @@ -package network - -import ( - "fmt" - "log/slog" - - "github.com/metal-stack/os-installer/pkg/exec" -) - -// chronyServiceEnabler can enable chrony systemd service for the given VRF. -type chronyServiceEnabler struct { - vrf string - log *slog.Logger -} - -// newChronyServiceEnabler constructs a new instance of this type. -func newChronyServiceEnabler(kb config) (chronyServiceEnabler, error) { - vrf, err := kb.getDefaultRouteVRFName() - return chronyServiceEnabler{ - vrf: vrf, - log: kb.log, - }, err -} - -// Enable enables chrony systemd service for the given VRF to be started after boot. -func (c chronyServiceEnabler) Enable() error { - cmd := fmt.Sprintf("systemctl enable chrony@%s", c.vrf) - c.log.Info("enable chrony", "command", cmd) - - return exec.NewVerboseCmd("bash", "-c", cmd).Run() -} - -func containsDefaultRoute(prefixes []string) bool { - for _, prefix := range prefixes { - if prefix == IPv4ZeroCIDR || prefix == IPv6ZeroCIDR { - return true - } - } - return false -} diff --git a/pkg/network/chrony_test.go b/pkg/network/chrony_test.go deleted file mode 100644 index 10bb313..0000000 --- a/pkg/network/chrony_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package network - -import ( - "testing" - - "github.com/metal-stack/metal-go/api/models" - mn "github.com/metal-stack/metal-lib/pkg/net" - apiv1 "github.com/metal-stack/os-installer/api/v1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestChronyServiceEnabler_Enable(t *testing.T) { - vrf := int64(104009) - external := mn.External - network := &models.V1MachineNetwork{Networktype: &external, Destinationprefixes: []string{IPv4ZeroCIDR}, Vrf: &vrf} - tests := []struct { - kb config - vrf string - isErrorExpected bool - }{ - { - kb: config{InstallerConfig: apiv1.InstallerConfig{Networks: []*models.V1MachineNetwork{network}}}, - vrf: "vrf104009", - isErrorExpected: false, - }, - { - kb: config{InstallerConfig: apiv1.InstallerConfig{Networks: []*models.V1MachineNetwork{}}}, - vrf: "", - isErrorExpected: true, - }, - } - - for _, tt := range tests { - e, err := newChronyServiceEnabler(tt.kb) - if tt.isErrorExpected { - require.Error(t, err) - } else { - require.NoError(t, err) - } - assert.Equal(t, tt.vrf, e.vrf) - } -} diff --git a/pkg/network/configurator.go b/pkg/network/configurator.go deleted file mode 100644 index 6fea055..0000000 --- a/pkg/network/configurator.go +++ /dev/null @@ -1,298 +0,0 @@ -package network - -import ( - "fmt" - "log/slog" - "os" - "path" - "text/template" - - "github.com/metal-stack/os-installer/pkg/exec" - "github.com/metal-stack/os-installer/pkg/net" -) - -// BareMetalType defines the type of configuration to apply. -type BareMetalType int - -const ( - // Firewall defines the bare metal server to function as firewall. - Firewall BareMetalType = iota - // Machine defines the bare metal server to function as machine. - Machine -) -const ( - // fileModeSystemd represents a file mode that allows systemd to read e.g. /etc/systemd/network files. - fileModeSystemd = 0644 - // fileModeSixFourFour represents file mode 0644 - fileModeSixFourFour = 0644 - // fileModeDefault represents the default file mode sufficient e.g. to /etc/network/interfaces or /etc/frr.conf. - fileModeDefault = 0600 - // systemdUnitPath is the path where systemd units will be generated. - systemdUnitPath = "/etc/systemd/system/" -) - -var ( - // systemdNetworkPath is the path where systemd-networkd expects its configuration files. - systemdNetworkPath = "/etc/systemd/network" - // tmpPath is the path where temporary files are stored for validation before they are moved to their intended place. - tmpPath = "/etc/metal/networker/" -) - -// ForwardPolicy defines how packets in the forwarding chain are handled, can be either drop or accept. -// drop will be the standard for firewalls which are not managed by kubernetes resources (CWNPs) -type ForwardPolicy string - -const ( - // ForwardPolicyDrop drops packets which try to go through the forwarding chain - ForwardPolicyDrop = ForwardPolicy("drop") - // ForwardPolicyAccept accepts packets which try to go through the forwarding chain - ForwardPolicyAccept = ForwardPolicy("accept") -) - -type ( - // Configurator is an interface to configure bare metal servers. - Configurator interface { - Configure(forwardPolicy ForwardPolicy) - ConfigureNftables(forwardPolicy ForwardPolicy) - } - - // machineConfigurator is a configurator that configures a bare metal server as 'machine'. - machineConfigurator struct { - c config - } - - // firewallConfigurator is a configurator that configures a bare metal server as 'firewall'. - firewallConfigurator struct { - c config - enableDNSProxy bool - } -) - -type unitConfiguration struct { - unit string - templateFile string - constructApplier func(kb config, v serviceValidator) (net.Applier, error) - enabled bool -} - -// NewConfigurator creates a new configurator. -func NewConfigurator(kind BareMetalType, c config, enableDNS bool) (Configurator, error) { - switch kind { - case Firewall: - return firewallConfigurator{ - c: c, - enableDNSProxy: enableDNS, - }, nil - case Machine: - return machineConfigurator{ - c: c, - }, nil - default: - return nil, fmt.Errorf("unknown type:%d", kind) - } -} - -// Configure applies configuration to a bare metal server to function as 'machine'. -func (mc machineConfigurator) Configure(forwardPolicy ForwardPolicy) { - applyCommonConfiguration(mc.c.log, Machine, mc.c) -} - -// ConfigureNftables is empty function that exists just to satisfy the Configurator interface -func (mc machineConfigurator) ConfigureNftables(forwardPolicy ForwardPolicy) {} - -// Configure applies configuration to a bare metal server to function as 'firewall'. -func (fc firewallConfigurator) Configure(forwardPolicy ForwardPolicy) { - kb := fc.c - applyCommonConfiguration(fc.c.log, Firewall, kb) - - fc.ConfigureNftables(forwardPolicy) - - chrony, err := newChronyServiceEnabler(fc.c) - if err != nil { - fc.c.log.Warn("failed to configure chrony", "error", err) - } else { - err := chrony.Enable() - if err != nil { - fc.c.log.Error("enabling chrony failed", "error", err) - } - } - - for _, u := range fc.getUnits() { - src := mustTmpFile(u.unit) - validatorService := serviceValidator{src} - nfe, err := u.constructApplier(fc.c, validatorService) - - if err != nil { - fc.c.log.Warn("failed to deploy", "unit", u.unit, "error", err) - } - - applyAndCleanUp(fc.c.log, nfe, u.templateFile, src, path.Join(systemdUnitPath, u.unit), fileModeSystemd, false) - - if u.enabled { - mustEnableUnit(fc.c.log, u.unit) - } - } - - src := mustTmpFile("suricata_") - applier, err := newSuricataDefaultsApplier(kb, src) - - if err != nil { - fc.c.log.Warn("failed to configure suricata defaults", "error", err) - } - - applyAndCleanUp(fc.c.log, applier, tplSuricataDefaults, src, "/etc/default/suricata", fileModeSixFourFour, false) - - src = mustTmpFile("suricata.yaml_") - applier, err = newSuricataConfigApplier(kb, src) - - if err != nil { - fc.c.log.Warn("failed to configure suricata", "error", err) - } - - applyAndCleanUp(fc.c.log, applier, tplSuricataConfig, src, "/etc/suricata/suricata.yaml", fileModeSixFourFour, false) -} - -func (fc firewallConfigurator) ConfigureNftables(forwardPolicy ForwardPolicy) { - src := mustTmpFile("nftrules_") - validator := NftablesValidator{ - path: src, - log: fc.c.log, - } - applier := newNftablesConfigApplier(fc.c, validator, fc.enableDNSProxy, forwardPolicy) - applyAndCleanUp(fc.c.log, applier, TplNftables, src, "/etc/nftables/rules", fileModeDefault, true) -} - -func (fc firewallConfigurator) getUnits() (units []unitConfiguration) { - units = []unitConfiguration{ - { - unit: systemdUnitDroptailer, - templateFile: tplDroptailer, - constructApplier: func(kb config, v serviceValidator) (net.Applier, error) { - return newDroptailerServiceApplier(kb, v) - }, - enabled: false, // will be enabled in the case of k8s deployments with ignition on first boot - }, - { - unit: systemdUnitFirewallController, - templateFile: tplFirewallController, - constructApplier: func(kb config, v serviceValidator) (net.Applier, error) { - return newFirewallControllerServiceApplier(kb, v) - }, - enabled: false, // will be enabled in the case of k8s deployments with ignition on first boot - }, - { - unit: systemdUnitNftablesExporter, - templateFile: tplNftablesExporter, - constructApplier: func(kb config, v serviceValidator) (net.Applier, error) { - return NewNftablesExporterServiceApplier(kb, v) - }, - enabled: true, - }, - { - unit: systemdUnitNodeExporter, - templateFile: tplNodeExporter, - constructApplier: func(kb config, v serviceValidator) (net.Applier, error) { - return newNodeExporterServiceApplier(kb, v) - }, - enabled: true, - }, - { - unit: systemdUnitSuricataUpdate, - templateFile: tplSuricataUpdate, - constructApplier: func(kb config, v serviceValidator) (net.Applier, error) { - return newSuricataUpdateServiceApplier(kb, v) - }, - enabled: true, - }, - } - - if fc.c.VPN != nil { - units = append(units, unitConfiguration{ - unit: systemdUnitTailscaled, - templateFile: tplTailscaled, - constructApplier: func(kb config, v serviceValidator) (net.Applier, error) { - return newTailscaledServiceApplier(kb, v) - }, - enabled: true, - }, unitConfiguration{ - unit: systemdUnitTailscale, - templateFile: tplTailscale, - constructApplier: func(kb config, v serviceValidator) (net.Applier, error) { - return newTailscaleServiceApplier(kb, v) - }, - enabled: true, - }) - } - - return units -} - -func applyCommonConfiguration(log *slog.Logger, kind BareMetalType, kb config) { - a := newIfacesApplier(kind, kb) - a.Apply() - - src := mustTmpFile("hosts_") - applier := newHostsApplier(kb, src) - applyAndCleanUp(log, applier, tplHosts, src, "/etc/hosts", fileModeDefault, false) - - src = mustTmpFile("hostname_") - applier = newHostnameApplier(kb, src) - applyAndCleanUp(log, applier, tplHostname, src, "/etc/hostname", fileModeSixFourFour, false) - - src = mustTmpFile("frr_") - applier = NewFrrConfigApplier(kind, kb, src, nil) - tpl := TplFirewallFRR - - if kind == Machine { - tpl = TplMachineFRR - } - - applyAndCleanUp(log, applier, tpl, src, "/etc/frr/frr.conf", fileModeDefault, false) -} - -func applyAndCleanUp(log *slog.Logger, applier net.Applier, tpl, src, dest string, mode os.FileMode, reload bool) { - log.Info("rendering", "template", tpl, "destination", dest, "mode", mode) - file := mustReadTpl(tpl) - mustApply(applier, file, src, dest, reload) - - err := os.Chmod(dest, mode) - if err != nil { - log.Error("unable change mode", "file", dest, "mode", mode, "error", err) - } - - _ = os.Remove(src) -} - -func mustEnableUnit(log *slog.Logger, unit string) { - cmd := fmt.Sprintf("systemctl enable %s", unit) - log.Info("enable unit", "command", cmd) - - err := exec.NewVerboseCmd("bash", "-c", cmd).Run() - - if err != nil { - panic(err) - } -} - -func mustApply(applier net.Applier, tpl, src, dest string, reload bool) { - t := template.Must(template.New(src).Parse(tpl)) - _, err := applier.Apply(*t, src, dest, reload) - - if err != nil { - panic(err) - } -} - -func mustTmpFile(prefix string) string { - f, err := os.CreateTemp(tmpPath, prefix) - if err != nil { - panic(err) - } - - err = f.Close() - if err != nil { - panic(err) - } - - return f.Name() -} diff --git a/pkg/network/configurator_test.go b/pkg/network/configurator_test.go deleted file mode 100644 index 764afb2..0000000 --- a/pkg/network/configurator_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package network - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewConfigurator(t *testing.T) { - tests := []struct { - kind BareMetalType - expected any - }{ - { - kind: Firewall, - expected: firewallConfigurator{}, - }, - { - kind: Machine, - expected: machineConfigurator{}, - }, - } - - for _, tt := range tests { - actual, err := NewConfigurator(tt.kind, config{}, false) - require.NoError(t, err) - assert.IsType(t, tt.expected, actual) - } -} diff --git a/pkg/network/doc.go b/pkg/network/doc.go deleted file mode 100644 index 79fbeff..0000000 --- a/pkg/network/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -package network groups functionality to configure networking related resources. -*/ -package network diff --git a/pkg/network/droptailer.go b/pkg/network/droptailer.go deleted file mode 100644 index 7786d0f..0000000 --- a/pkg/network/droptailer.go +++ /dev/null @@ -1,41 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/metal-stack/os-installer/pkg/net" -) - -// TplDroptailer is the name of the template for the droptailer service. -const tplDroptailer = "droptailer.service.tpl" - -// SystemdUnitDroptailer is the name of the systemd unit for the droptailer. -const systemdUnitDroptailer = "droptailer.service" - -// droptailerData contains the data to render the droptailer service template. -type droptailerData struct { - Comment string - TenantVrf string -} - -// newDroptailerServiceApplier constructs a new instance of this type. -func newDroptailerServiceApplier(kb config, v net.Validator) (net.Applier, error) { - tenantVrf, err := getTenantVRFName(kb) - if err != nil { - return nil, err - } - - data := droptailerData{Comment: versionHeader(kb.MachineUUID), TenantVrf: tenantVrf} - - return net.NewNetworkApplier(data, v, nil), nil -} - -func getTenantVRFName(kb config) (string, error) { - primary := kb.getPrivatePrimaryNetwork() - if primary.Vrf != nil && *primary.Vrf != 0 { - vrf := fmt.Sprintf("vrf%d", *primary.Vrf) - return vrf, nil - } - - return "", fmt.Errorf("there is no private tenant network") -} diff --git a/pkg/network/firewall_controller.go b/pkg/network/firewall_controller.go deleted file mode 100644 index 0f34da3..0000000 --- a/pkg/network/firewall_controller.go +++ /dev/null @@ -1,51 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/metal-stack/os-installer/pkg/net" -) - -// TplFirewallController is the name of the template for the firewall-policy-controller service. -const tplFirewallController = "firewall_controller.service.tpl" - -// SystemdUnitFirewallController is the name of the systemd unit for the firewall policy controller, -const systemdUnitFirewallController = "firewall-controller.service" - -// firewallControllerData contains the data to render the firewall-controller service template. -type firewallControllerData struct { - Comment string - DefaultRouteVrf string - ServiceIP string - PrivateVrfID int64 -} - -// newFirewallControllerServiceApplier constructs a new instance of this type. -func newFirewallControllerServiceApplier(kb config, v net.Validator) (net.Applier, error) { - defaultRouteVrf, err := kb.getDefaultRouteVRFName() - if err != nil { - return nil, err - } - - if len(kb.getPrivatePrimaryNetwork().Ips) == 0 { - return nil, fmt.Errorf("no private IP found useable for the firewall controller") - } - data := firewallControllerData{ - Comment: versionHeader(kb.MachineUUID), - DefaultRouteVrf: defaultRouteVrf, - } - - return net.NewNetworkApplier(data, v, nil), nil -} - -// serviceValidator holds information for systemd service validation. -type serviceValidator struct { - path string -} - -// Validate validates the service file. -func (v serviceValidator) Validate() error { - // Currently not implemented as systemd-analyze fails in the metal-hammer. - // Error: Cannot determine cgroup we are running in: No medium found - return nil -} diff --git a/pkg/network/frr.go b/pkg/network/frr.go deleted file mode 100644 index ac6073e..0000000 --- a/pkg/network/frr.go +++ /dev/null @@ -1,164 +0,0 @@ -package network - -import ( - "fmt" - "log/slog" - "net/netip" - - "github.com/Masterminds/semver/v3" - "github.com/metal-stack/metal-go/api/models" - mn "github.com/metal-stack/metal-lib/pkg/net" - "github.com/metal-stack/os-installer/pkg/exec" - "github.com/metal-stack/os-installer/pkg/net" -) - -const ( - // FRRVersion holds a string that is used in the frr.conf to define the FRR version. - FRRVersion = "8.5" - // TplFirewallFRR defines the name of the template to render FRR configuration to a 'firewall'. - TplFirewallFRR = "frr.firewall.tpl" - // TplMachineFRR defines the name of the template to render FRR configuration to a 'machine'. - TplMachineFRR = "frr.machine.tpl" - // IPPrefixListSeqSeed specifies the initial value for prefix lists sequence number. - IPPrefixListSeqSeed = 100 - // IPPrefixListNoExportSuffix defines the suffix to use for private IP ranges that must not be exported. - IPPrefixListNoExportSuffix = "-no-export" - // RouteMapOrderSeed defines the initial value for route-map order. - RouteMapOrderSeed = 10 - // AddressFamilyIPv4 is the name for this address family for the routing daemon. - AddressFamilyIPv4 = "ip" - // AddressFamilyIPv6 is the name for this address family for the routing daemon. - AddressFamilyIPv6 = "ipv6" -) - -type ( - // CommonFRRData contains attributes that are common to FRR configuration of all kind of bare metal servers. - CommonFRRData struct { - ASN int64 - Comment string - FRRVersion string - Hostname string - RouterID string - } - - // MachineFRRData contains attributes required to render frr.conf of bare metal servers that function as 'machine'. - MachineFRRData struct { - CommonFRRData - } - - // FirewallFRRData contains attributes required to render frr.conf of bare metal servers that function as 'firewall'. - FirewallFRRData struct { - CommonFRRData - VRFs []VRF - } - - // frrValidator validates the frr.conf to apply. - frrValidator struct { - path string - log *slog.Logger - } - - // AddressFamily is the address family for the routing daemon. - AddressFamily string -) - -// NewFrrConfigApplier constructs a new Applier of the given type of Bare Metal. -func NewFrrConfigApplier(kind BareMetalType, c config, tmpFile string, frrVersion *semver.Version) net.Applier { - var data any - - switch kind { - case Firewall: - net := c.getUnderlayNetwork() - data = FirewallFRRData{ - CommonFRRData: CommonFRRData{ - FRRVersion: FRRVersion, - Hostname: c.Hostname, - Comment: versionHeader(c.MachineUUID), - ASN: *net.Asn, - RouterID: routerID(net), - }, - VRFs: assembleVRFs(c, frrVersion), - } - case Machine: - net := c.getPrivatePrimaryNetwork() - data = MachineFRRData{ - CommonFRRData: CommonFRRData{ - FRRVersion: FRRVersion, - Hostname: c.Hostname, - Comment: versionHeader(c.MachineUUID), - ASN: *net.Asn, - RouterID: routerID(net), - }, - } - default: - c.log.Error("unknown kind of bare metal", "kind", kind) - panic(fmt.Errorf("unknown kind %v", kind)) - } - - validator := frrValidator{ - path: tmpFile, - log: c.log, - } - - return net.NewNetworkApplier(data, validator, net.NewDBusReloader("frr.service")) -} - -// routerID will calculate the bgp router-id which must only be specified in the ipv6 range. -// returns 0.0.0.0 for erroneous ip addresses and 169.254.255.255 for ipv6 -// TODO prepare machine allocations with ipv6 primary address and tests -func routerID(net *models.V1MachineNetwork) string { - if len(net.Ips) < 1 { - return "0.0.0.0" - } - ip, err := netip.ParseAddr(net.Ips[0]) - if err != nil { - return "0.0.0.0" - } - if ip.Is4() { - return ip.String() - } - return "169.254.255.255" -} - -// Validate can be used to run validation on FRR configuration using vtysh. -func (v frrValidator) Validate() error { - vtysh := fmt.Sprintf("vtysh --dryrun --inputfile %s", v.path) - v.log.Info("validate changes", "command", vtysh) - - return exec.NewVerboseCmd("bash", "-c", vtysh, v.path).Run() -} - -func assembleVRFs(kb config, frrVersion *semver.Version) []VRF { - var ( - result []VRF - frr *FRR - ) - if frrVersion != nil { - frr = &FRR{ - Major: frrVersion.Major(), - Minor: frrVersion.Minor(), - } - } - - networks := kb.GetNetworks(mn.PrivatePrimaryUnshared, mn.PrivatePrimaryShared, mn.PrivateSecondaryShared, mn.External) - for _, network := range networks { - if network.Networktype == nil { - continue - } - - i := importRulesForNetwork(kb, network) - vrf := VRF{ - Identity: Identity{ - ID: int(*network.Vrf), - }, - VNI: int(*network.Vrf), - ImportVRFNames: i.ImportVRFs, - IPPrefixLists: i.prefixLists(), - RouteMaps: i.routeMaps(), - FRRVersion: frr, - } - result = append(result, vrf) - } - - return result -} diff --git a/pkg/network/frr_test.go b/pkg/network/frr_test.go deleted file mode 100644 index 999e128..0000000 --- a/pkg/network/frr_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package network - -import ( - "bytes" - "log/slog" - "os" - "testing" - - "github.com/Masterminds/semver/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFrrConfigApplier(t *testing.T) { - tests := []struct { - name string - input string - frrVersion *semver.Version - expectedOutput string - configuratorType BareMetalType - tpl string - }{ - { - name: "firewall of a shared private network", - input: "testdata/firewall_shared.yaml", - expectedOutput: "testdata/frr.conf.firewall_shared", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "standard firewall with private primary unshared network, private secondary shared network, internet and mpls", - input: "testdata/firewall.yaml", - expectedOutput: "testdata/frr.conf.firewall", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "dmz firewall with private primary unshared network, private secondary shared dmz network, internet and mpls", - input: "testdata/firewall_dmz.yaml", - expectedOutput: "testdata/frr.conf.firewall_dmz", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "dmz firewall with private primary unshared network, private secondary shared dmz network", - input: "testdata/firewall_dmz_app.yaml", - expectedOutput: "testdata/frr.conf.firewall_dmz_app", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "firewall with private primary unshared network, private secondary shared dmz network and private secondary shared storage network", - input: "testdata/firewall_dmz_app_storage.yaml", - expectedOutput: "testdata/frr.conf.firewall_dmz_app_storage", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "firewall with private primary unshared ipv6 network, private secondary shared ipv4 network, ipv6 internet and ipv4 mpls", - input: "testdata/firewall_ipv6.yaml", - expectedOutput: "testdata/frr.conf.firewall_ipv6", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "firewall with private primary unshared ipv6 network, private secondary shared ipv4 network, dualstack internet and ipv4 mpls", - input: "testdata/firewall_dualstack.yaml", - expectedOutput: "testdata/frr.conf.firewall_dualstack", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "standard machine", - input: "testdata/machine.yaml", - expectedOutput: "testdata/frr.conf.machine", - configuratorType: Machine, - tpl: TplMachineFRR, - }, - { - name: "standard firewall with lower frr version", - input: "testdata/firewall.yaml", - frrVersion: semver.MustParse("9.0.5-0"), - expectedOutput: "testdata/frr.conf.firewall_frr-9", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - { - name: "standard firewall with higher frr version", - input: "testdata/firewall.yaml", - frrVersion: semver.MustParse("10.1.5"), - expectedOutput: "testdata/frr.conf.firewall_frr-10", - configuratorType: Firewall, - tpl: TplFirewallFRR, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - log := slog.Default() - kb, err := New(log, test.input) - require.NoError(t, err) - a := NewFrrConfigApplier(test.configuratorType, *kb, "", test.frrVersion) - b := bytes.Buffer{} - - tpl := MustParseTpl(test.tpl) - err = a.Render(&b, *tpl) - require.NoError(t, err) - - // eases adjustment of test fixtures - // just remove old test fixture after a code change - // let the new fixtures get generated - // check them manually before commit - if _, err := os.Stat(test.expectedOutput); os.IsNotExist(err) { - err = os.WriteFile(test.expectedOutput, b.Bytes(), fileModeDefault) - require.NoError(t, err) - return - } - - expected, err := os.ReadFile(test.expectedOutput) - require.NoError(t, err) - assert.Equal(t, string(expected), b.String()) - }) - } -} - -func TestFRRValidator_Validate(t *testing.T) { - validator := frrValidator{ - log: slog.Default(), - } - actual := validator.Validate() - require.Error(t, actual) -} diff --git a/pkg/network/hostname.go b/pkg/network/hostname.go deleted file mode 100644 index 7273f2c..0000000 --- a/pkg/network/hostname.go +++ /dev/null @@ -1,33 +0,0 @@ -package network - -import ( - "github.com/metal-stack/os-installer/pkg/net" -) - -// tplHostname defines the name of the template to render /etc/hostname. -const tplHostname = "hostname.tpl" - -type ( - // HostnameData contains attributes to render hostname file. - HostnameData struct { - Comment, Hostname string - } - - // HostnameValidator validates hostname changes. - HostnameValidator struct { - path string - } -) - -// newHostnameApplier creates a new Applier to render hostname. -func newHostnameApplier(kb config, tmpFile string) net.Applier { - data := HostnameData{Comment: versionHeader(kb.MachineUUID), Hostname: kb.Hostname} - validator := HostnameValidator{tmpFile} - - return net.NewNetworkApplier(data, validator, nil) -} - -// Validate validates hostname rendering. -func (v HostnameValidator) Validate() error { - return nil -} diff --git a/pkg/network/hostname_test.go b/pkg/network/hostname_test.go deleted file mode 100644 index 78f1f9e..0000000 --- a/pkg/network/hostname_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package network - -import ( - "bytes" - "log/slog" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNameHostname(t *testing.T) { - expected, err := os.ReadFile("testdata/hostname") - require.NoError(t, err) - - log := slog.Default() - kb, err := New(log, "testdata/firewall.yaml") - require.NoError(t, err) - - a := newHostnameApplier(*kb, "") - b := bytes.Buffer{} - - tpl := MustParseTpl(tplHostname) - err = a.Render(&b, *tpl) - require.NoError(t, err) - assert.Equal(t, string(expected), b.String()) -} diff --git a/pkg/network/hosts.go b/pkg/network/hosts.go deleted file mode 100644 index 0ae1d42..0000000 --- a/pkg/network/hosts.go +++ /dev/null @@ -1,37 +0,0 @@ -package network - -import ( - "github.com/metal-stack/os-installer/pkg/net" -) - -// tplHosts defines the name of the template to render hosts file. -const tplHosts = "hosts.tpl" - -type ( - // HostsData contains data to render hosts file. - HostsData struct { - Comment string - Hostname string - IP string - } - - // HostsValidator validates hosts file. - HostsValidator struct { - path string - } -) - -// newHostsApplier creates a new hosts applier. -func newHostsApplier(kb config, tmpFile string) net.Applier { - data := HostsData{Hostname: kb.Hostname, Comment: versionHeader(kb.MachineUUID), IP: kb.getPrivatePrimaryNetwork().Ips[0]} - validator := HostsValidator{tmpFile} - - return net.NewNetworkApplier(data, validator, nil) -} - -// Validate validates hosts file. -func (v HostsValidator) Validate() error { - //nolint:godox - // FIXME: How do we validate a hosts file? - return nil -} diff --git a/pkg/network/hosts_test.go b/pkg/network/hosts_test.go deleted file mode 100644 index 233fc2d..0000000 --- a/pkg/network/hosts_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package network - -import ( - "bytes" - "log/slog" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewHostsApplier(t *testing.T) { - expected, err := os.ReadFile("testdata/hosts") - require.NoError(t, err) - - log := slog.Default() - kb, err := New(log, "testdata/firewall.yaml") - require.NoError(t, err) - a := newHostsApplier(*kb, "") - b := bytes.Buffer{} - - tpl := MustParseTpl(tplHosts) - err = a.Render(&b, *tpl) - require.NoError(t, err) - assert.Equal(t, string(expected), b.String()) -} diff --git a/pkg/network/interfaces.go b/pkg/network/interfaces.go deleted file mode 100644 index 81b8d6e..0000000 --- a/pkg/network/interfaces.go +++ /dev/null @@ -1,165 +0,0 @@ -package network - -import ( - "fmt" - "io" - "log/slog" - "net/netip" - "text/template" - - mn "github.com/metal-stack/metal-lib/pkg/net" -) - -type ( - // IfacesData contains attributes required to render network interfaces configuration of a bare metal - // server. - IfacesData struct { - Comment string - Loopback Loopback - EVPNIfaces []EVPNIface - } -) - -// ifacesApplier applies interfaces configuration. -type ifacesApplier struct { - kind BareMetalType - kb config - data IfacesData -} - -// newIfacesApplier constructs a new instance of this type. -func newIfacesApplier(kind BareMetalType, c config) ifacesApplier { - d := IfacesData{ - Comment: versionHeader(c.MachineUUID), - } - - switch kind { - case Firewall: - underlay := c.getUnderlayNetwork() - d.Loopback.Comment = fmt.Sprintf("# networkid: %s", *underlay.Networkid) - d.Loopback.IPs = addBitlen(underlay.Ips) - d.EVPNIfaces = getEVPNIfaces(c) - case Machine: - private := c.getPrivatePrimaryNetwork() - d.Loopback.Comment = fmt.Sprintf("# networkid: %s", *private.Networkid) - // Ensure that the ips of the private network are the first ips at the loopback interface. - // The first lo IP is used within network communication and other systems depend on seeing the first private ip. - d.Loopback.IPs = addBitlen(append(private.Ips, c.CollectIPs(mn.External)...)) - default: - c.log.Error("unknown configuratorType", "kind", kind) - panic(fmt.Errorf("unknown configurator type:%v", kind)) - } - - return ifacesApplier{kind: kind, kb: c, data: d} -} - -func addBitlen(ips []string) []string { - ipsWithMask := []string{} - for _, ip := range ips { - parsedIP, err := netip.ParseAddr(ip) - if err != nil { - continue - } - ipWithMask := fmt.Sprintf("%s/%d", ip, parsedIP.BitLen()) - ipsWithMask = append(ipsWithMask, ipWithMask) - } - return ipsWithMask -} - -// Render renders the network interfaces to the given writer using the given template. -func (a *ifacesApplier) Render(w io.Writer, tpl template.Template) error { - return tpl.Execute(w, a.data) -} - -// Apply applies the interface configuration with systemd-networkd. -func (a *ifacesApplier) Apply() { - uuid := a.kb.MachineUUID - evpnIfaces := a.data.EVPNIfaces - - // /etc/systemd/network/00 loopback - src := mustTmpFile("lo_network_") - applier := newSystemdNetworkdApplier(src, a.data) - dest := fmt.Sprintf("%s/00-lo.network", systemdNetworkPath) - applyAndCleanUp(a.kb.log, applier, tplSystemdNetworkLo, src, dest, fileModeSystemd, false) - - // /etc/systemd/network/1x* lan interfaces - offset := 10 - for i, nic := range a.kb.Nics { - prefix := fmt.Sprintf("lan%d_link_", i) - src := mustTmpFile(prefix) - applier, err := newSystemdLinkApplier(a.kind, uuid, i, nic, src, evpnIfaces) - if err != nil { - a.kb.log.Error("unable to create systemdlinkapplier", "error", err) - panic(err) - } - dest := fmt.Sprintf("%s/%d-lan%d.link", systemdNetworkPath, offset+i, i) - applyAndCleanUp(a.kb.log, applier, tplSystemdLinkLan, src, dest, fileModeSystemd, false) - - prefix = fmt.Sprintf("lan%d_network_", i) - src = mustTmpFile(prefix) - applier, err = newSystemdLinkApplier(a.kind, uuid, i, nic, src, evpnIfaces) - if err != nil { - a.kb.log.Error("unable to create systemdlinkapplier", "error", err) - panic(err) - } - dest = fmt.Sprintf("%s/%d-lan%d.network", systemdNetworkPath, offset+i, i) - applyAndCleanUp(a.kb.log, applier, tplSystemdNetworkLan, src, dest, fileModeSystemd, false) - } - - if a.kind == Machine { - return - } - - // /etc/systemd/network/20 bridge interface - applyNetdevAndNetwork(a.kb.log, 20, 20, "bridge", "", a.data) - - // /etc/systemd/network/3x* triplet of interfaces for a tenant: vrf, svi, vxlan - offset = 30 - for i, tenant := range a.data.EVPNIfaces { - suffix := fmt.Sprintf("-%d", tenant.VRF.ID) - applyNetdevAndNetwork(a.kb.log, offset, offset+i, "vrf", suffix, tenant) - applyNetdevAndNetwork(a.kb.log, offset, offset+i, "svi", suffix, tenant) - applyNetdevAndNetwork(a.kb.log, offset, offset+i, "vxlan", suffix, tenant) - } -} - -func applyNetdevAndNetwork(log *slog.Logger, si, di int, prefix, suffix string, data any) { - src := mustTmpFile(prefix + "_netdev_") - applier := newSystemdNetworkdApplier(src, data) - dest := fmt.Sprintf("%s/%d-%s%s.netdev", systemdNetworkPath, di, prefix, suffix) - tpl := fmt.Sprintf("networkd/%d-%s.netdev.tpl", si, prefix) - applyAndCleanUp(log, applier, tpl, src, dest, fileModeSystemd, false) - - src = mustTmpFile(prefix + "_network_") - applier = newSystemdNetworkdApplier(src, data) - dest = fmt.Sprintf("%s/%d-%s%s.network", systemdNetworkPath, di, prefix, suffix) - tpl = fmt.Sprintf("networkd/%d-%s.network.tpl", si, prefix) - applyAndCleanUp(log, applier, tpl, src, dest, fileModeSystemd, false) -} - -func getEVPNIfaces(kb config) []EVPNIface { - var result []EVPNIface - - vrfTableOffset := 1000 - for i, n := range kb.Networks { - if n.Underlay != nil && *n.Underlay { - continue - } - - vrf := int(*n.Vrf) - e := EVPNIface{} - e.Comment = versionHeader(kb.MachineUUID) - e.SVI.Comment = fmt.Sprintf("# svi (networkid: %s)", *n.Networkid) - e.SVI.VLANID = VLANOffset + i - e.SVI.Addresses = addBitlen(n.Ips) - e.VXLAN.Comment = fmt.Sprintf("# vxlan (networkid: %s)", *n.Networkid) - e.VXLAN.ID = vrf - e.VXLAN.TunnelIP = kb.getUnderlayNetwork().Ips[0] - e.VRF.Comment = fmt.Sprintf("# vrf (networkid: %s)", *n.Networkid) - e.VRF.ID = vrf - e.VRF.Table = vrfTableOffset + i - result = append(result, e) - } - - return result -} diff --git a/pkg/network/interfaces_test.go b/pkg/network/interfaces_test.go deleted file mode 100644 index 594d83c..0000000 --- a/pkg/network/interfaces_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package network - -import ( - "fmt" - "log/slog" - "os" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/require" -) - -func TestIfacesApplier(t *testing.T) { - tests := []struct { - input string - expectedOutput string - configuratorType BareMetalType - }{ - { - input: "testdata/firewall.yaml", - expectedOutput: "testdata/networkd/firewall", - configuratorType: Firewall, - }, - { - input: "testdata/machine.yaml", - expectedOutput: "testdata/networkd/machine", - configuratorType: Machine, - }, - } - log := slog.Default() - - tmpPath = os.TempDir() - for _, tc := range tests { - func() { - old := systemdNetworkPath - tempdir, err := os.MkdirTemp(os.TempDir(), "networkd*") - require.NoError(t, err) - systemdNetworkPath = tempdir - defer func() { - _ = os.RemoveAll(systemdNetworkPath) - systemdNetworkPath = old - }() - kb, err := New(log, tc.input) - require.NoError(t, err) - a := newIfacesApplier(tc.configuratorType, *kb) - a.Apply() - if equal, s := equalDirs(systemdNetworkPath, tc.expectedOutput); !equal { - t.Error(s) - } - }() - } -} - -func equalDirs(dir1, dir2 string) (bool, string) { - files1 := list(dir1) - files2 := list(dir2) - if !cmp.Equal(files1, files2) { - return false, fmt.Sprintf("list of files is different: %v", cmp.Diff(files1, files2)) - } - - for _, f := range files1 { - f1, err := os.ReadFile(fmt.Sprintf("%s/%s", dir1, f)) - if err != nil { - panic(err) - } - f2, err := os.ReadFile(fmt.Sprintf("%s/%s", dir2, f)) - if err != nil { - panic(err) - } - s1 := string(f1) - s2 := string(f2) - if !cmp.Equal(s1, s2) { - return false, fmt.Sprintf("file %s differs: %v", f, cmp.Diff(s1, s2)) - } - } - return true, "" -} - -func list(dir string) []string { - f, err := os.Open(dir) - if err != nil { - panic(err) - } - finfos, err := f.Readdir(-1) - _ = f.Close() - if err != nil { - panic(err) - } - files := []string{} - for _, file := range finfos { - files = append(files, file.Name()) - } - sort.Strings(files) - return files -} diff --git a/pkg/network/knowledgebase.go b/pkg/network/knowledgebase.go deleted file mode 100644 index 2f2ff57..0000000 --- a/pkg/network/knowledgebase.go +++ /dev/null @@ -1,249 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "log/slog" - "net" - "os" - "slices" - - apiv1 "github.com/metal-stack/os-installer/api/v1" - - "github.com/metal-stack/metal-go/api/models" - mn "github.com/metal-stack/metal-lib/pkg/net" - "github.com/metal-stack/v" - - "gopkg.in/yaml.v3" -) - -const ( - // VLANOffset defines a number to start with when creating new VLAN IDs. - VLANOffset = 1000 -) - -type ( - // config was generated with: https://mengzhuo.github.io/yaml-to-go/. - // It represents the input yaml that is needed to render network configuration files. - config struct { - apiv1.InstallerConfig - log *slog.Logger - } -) - -// New creates a new instance of this type. -func New(log *slog.Logger, path string) (*config, error) { - log.Info("loading", "path", path) - - f, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - installer := &apiv1.InstallerConfig{} - err = yaml.Unmarshal(f, &installer) - - if err != nil { - return nil, err - } - - return &config{ - InstallerConfig: *installer, - log: log, - }, nil -} - -// Validate validates the containing information depending on the demands of the bare metal type. -func (c config) Validate(kind BareMetalType) error { - if len(c.Networks) == 0 { - return errors.New("expectation at least one network is present failed") - } - - if !c.containsSinglePrivatePrimary() { - return errors.New("expectation exactly one 'private: true' network is present failed") - } - - if kind == Firewall { - if !c.allNonUnderlayNetworksHaveNonZeroVRF() { - return errors.New("networks with 'underlay: false' must contain a value of 'vrf' as it is used for BGP") - } - - if !c.containsSingleUnderlay() { - return errors.New("expectation exactly one underlay network is present failed") - } - - if !c.containsAnyPublicNetwork() { - return errors.New("expectation at least one public network (private: false, " + - "underlay: false) is present failed") - } - - for _, net := range c.GetNetworks(mn.External) { - if len(net.Destinationprefixes) == 0 { - return errors.New("non-private, non-underlay networks must contain destination prefix(es) to make " + - "any sense of it") - } - } - - if c.isAnyNAT() && len(c.getPrivatePrimaryNetwork().Prefixes) == 0 { - return errors.New("private network must not lack prefixes since nat is required") - } - } - - net := c.getPrivatePrimaryNetwork() - - if kind == Firewall { - net = c.getUnderlayNetwork() - } - - if len(net.Ips) == 0 { - return errors.New("at least one IP must be present to be considered as LOOPBACK IP (" + - "'private: true' network IP for machine, 'underlay: true' network IP for firewall") - } - - if net.Asn != nil && *net.Asn <= 0 { - return errors.New("'asn' of private (machine) resp. underlay (firewall) network must not be missing") - } - - if len(c.Nics) == 0 { - return errors.New("at least one 'nics/nic' definition must be present") - } - - if !c.nicsContainValidMACs() { - return errors.New("each 'nic' definition must contain a valid 'mac'") - } - - return nil -} - -func (c config) containsAnyPublicNetwork() bool { - if len(c.GetNetworks(mn.External)) > 0 { - return true - } - return slices.ContainsFunc(c.Networks, isDMZNetwork) -} - -func (c config) containsSinglePrivatePrimary() bool { - return c.containsSingleNetworkOf(mn.PrivatePrimaryUnshared) != c.containsSingleNetworkOf(mn.PrivatePrimaryShared) -} - -func (c config) containsSingleUnderlay() bool { - return c.containsSingleNetworkOf(mn.Underlay) -} - -func (c config) containsSingleNetworkOf(t string) bool { - possibleNetworks := c.GetNetworks(t) - return len(possibleNetworks) == 1 -} - -// CollectIPs collects IPs of the given networks. -func (c config) CollectIPs(types ...string) []string { - var result []string - - networks := c.GetNetworks(types...) - for _, network := range networks { - result = append(result, network.Ips...) - } - - return result -} - -// GetNetworks returns all networks present. -func (c config) GetNetworks(types ...string) []*models.V1MachineNetwork { - var result []*models.V1MachineNetwork - - for _, t := range types { - for _, n := range c.Networks { - if n.Networktype == nil { - continue - } - if *n.Networktype == t { - result = append(result, n) - } - } - } - - return result -} - -func (c config) isAnyNAT() bool { - for _, net := range c.Networks { - if net.Nat != nil && *net.Nat { - return true - } - } - - return false -} - -func (c config) getPrivatePrimaryNetwork() *models.V1MachineNetwork { - return c.GetNetworks(mn.PrivatePrimaryUnshared, mn.PrivatePrimaryShared)[0] -} - -func (c config) getUnderlayNetwork() *models.V1MachineNetwork { - // Safe access since validation ensures there is exactly one. - return c.GetNetworks(mn.Underlay)[0] -} - -func (c config) GetDefaultRouteNetwork() *models.V1MachineNetwork { - externalNets := c.GetNetworks(mn.External) - for _, network := range externalNets { - if containsDefaultRoute(network.Destinationprefixes) { - return network - } - } - - privateSecondarySharedNets := c.GetNetworks(mn.PrivateSecondaryShared) - for _, network := range privateSecondarySharedNets { - if containsDefaultRoute(network.Destinationprefixes) { - return network - } - } - - return nil -} - -func (c config) getDefaultRouteVRFName() (string, error) { - if network := c.GetDefaultRouteNetwork(); network != nil { - return vrfNameOf(network), nil - } - - return "", fmt.Errorf("there is no network providing a default (0.0.0.0/0) route") -} - -func (c config) nicsContainValidMACs() bool { - for _, nic := range c.Nics { - if nic.Mac == nil || *nic.Mac == "" { - return false - } - - if _, err := net.ParseMAC(*nic.Mac); err != nil { - c.log.Error("invalid mac", "mac", *nic.Mac) - return false - } - } - - return true -} - -func (c config) allNonUnderlayNetworksHaveNonZeroVRF() bool { - for _, net := range c.Networks { - if net.Underlay != nil && *net.Underlay { - continue - } - - if net.Vrf != nil && *net.Vrf <= 0 { - return false - } - } - - return true -} - -func versionHeader(uuid string) string { - version := v.V.String() - if os.Getenv("GO_ENV") == "testing" { - version = "" - } - return fmt.Sprintf("# This file was auto generated for machine: '%s' by app version %s.\n# Do not edit.", - uuid, version) -} diff --git a/pkg/network/knowledgebase_test.go b/pkg/network/knowledgebase_test.go deleted file mode 100644 index 90e707f..0000000 --- a/pkg/network/knowledgebase_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package network - -import ( - "fmt" - "log/slog" - "testing" - - "github.com/metal-stack/metal-go/api/models" - mn "github.com/metal-stack/metal-lib/pkg/net" - apiv1 "github.com/metal-stack/os-installer/api/v1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func mustNewKnowledgeBase(t *testing.T) config { - log := slog.Default() - - d, err := New(log, "testdata/firewall.yaml") - require.NoError(t, err) - assert.NotNil(t, d) - - return *d -} - -func TestNewKnowledgeBase(t *testing.T) { - - d := mustNewKnowledgeBase(t) - - assert.Equal(t, "firewall", d.Hostname) - assert.NotEmpty(t, d.Networks) - assert.Len(t, d.Networks, 5) - - // private network - n := d.Networks[0] - assert.Len(t, n.Ips, 1) - assert.Equal(t, "10.0.16.2", n.Ips[0]) - assert.Len(t, n.Prefixes, 1) - assert.Equal(t, "10.0.16.0/22", n.Prefixes[0]) - assert.True(t, *n.Private) - assert.Equal(t, mn.PrivatePrimaryUnshared, *n.Networktype) - assert.Equal(t, int64(3981), *n.Vrf) - - // private shared network - n = d.Networks[1] - assert.Len(t, n.Ips, 1) - assert.Equal(t, "10.0.18.2", n.Ips[0]) - assert.Len(t, n.Prefixes, 1) - assert.Equal(t, "10.0.18.0/22", n.Prefixes[0]) - assert.True(t, *n.Private) - assert.Equal(t, mn.PrivateSecondaryShared, *n.Networktype) - assert.Equal(t, int64(3982), *n.Vrf) - - // public networks - n = d.Networks[2] - assert.Len(t, n.Destinationprefixes, 1) - assert.Equal(t, IPv4ZeroCIDR, n.Destinationprefixes[0]) - assert.Len(t, n.Ips, 1) - assert.Equal(t, "185.1.2.3", n.Ips[0]) - assert.Len(t, n.Prefixes, 2) - assert.Equal(t, "185.1.2.0/24", n.Prefixes[0]) - assert.Equal(t, "185.27.0.0/22", n.Prefixes[1]) - assert.False(t, *n.Underlay) - assert.False(t, *n.Private) - assert.True(t, *n.Nat) - assert.Equal(t, mn.External, *n.Networktype) - assert.Equal(t, int64(104009), *n.Vrf) - - // underlay network - n = d.Networks[3] - assert.Equal(t, int64(4200003073), *n.Asn) - assert.Len(t, n.Ips, 1) - assert.Equal(t, "10.1.0.1", n.Ips[0]) - assert.Len(t, n.Prefixes, 1) - assert.Equal(t, "10.0.12.0/22", n.Prefixes[0]) - assert.True(t, *n.Underlay) - assert.Equal(t, mn.Underlay, *n.Networktype) - - // public network mpls - n = d.Networks[4] - assert.Len(t, n.Destinationprefixes, 1) - assert.Equal(t, "100.127.1.0/24", n.Destinationprefixes[0]) - assert.Len(t, n.Ips, 1) - assert.Equal(t, "100.127.129.1", n.Ips[0]) - assert.Len(t, n.Prefixes, 1) - assert.Equal(t, "100.127.129.0/24", n.Prefixes[0]) - assert.False(t, *n.Underlay) - assert.False(t, *n.Private) - assert.True(t, *n.Nat) - assert.Equal(t, mn.External, *n.Networktype) - assert.Equal(t, int64(104010), *n.Vrf) -} - -var ( - boolTrue = true - boolFalse = false - asn0 = int64(0) - asn1 = int64(1011209) - vrf0 = int64(0) - vrf1 = int64(1011209) -) - -func stubKnowledgeBase() config { - privateNetID := "private" - underlayNetID := "underlay" - mac := "00:00:00:00:00:00" - privatePrimaryUnshared := mn.PrivatePrimaryUnshared - underlay := mn.Underlay - external := mn.External - log := slog.Default() - - return config{ - InstallerConfig: apiv1.InstallerConfig{ - Networks: []*models.V1MachineNetwork{ - {Private: &boolTrue, Networktype: &privatePrimaryUnshared, Ips: []string{"10.0.0.1"}, Asn: &asn1, Vrf: &vrf1, Networkid: &privateNetID}, - {Underlay: &boolTrue, Networktype: &underlay, Ips: []string{"10.0.0.1"}, Asn: &asn1, Vrf: &vrf0, Networkid: &underlayNetID}, - {Private: &boolFalse, Networktype: &external, Underlay: &boolFalse, Destinationprefixes: []string{"10.0.0.1/24"}, Asn: &asn1, Vrf: &vrf1, Networkid: &underlayNetID}, - }, - Nics: []*models.V1MachineNic{ - { - Mac: &mac}, - }, - }, - log: log, - } -} - -func TestKnowledgeBase_Validate(t *testing.T) { - tests := []struct { - expectedErrMsg string - kb config - kinds []BareMetalType - }{{ - expectedErrMsg: "", - kb: stubKnowledgeBase(), - kinds: []BareMetalType{Firewall, Machine}, - }, - { - expectedErrMsg: "expectation at least one network is present failed", - kb: stripNetworks(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall, Machine}, - }, - { - expectedErrMsg: "at least one IP must be present to be considered as LOOPBACK IP (" + - "'private: true' network IP for machine, 'underlay: true' network IP for firewall", - kb: stripIPs(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall, Machine}, - }, - {expectedErrMsg: "expectation exactly one underlay network is present failed", - kb: maskUnderlayNetworks(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall}}, - {expectedErrMsg: "expectation exactly one 'private: true' network is present failed", - kb: maskPrivatePrimaryNetworks(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall, Machine}}, - {expectedErrMsg: "'asn' of private (machine) resp. underlay (firewall) network must not be missing", - kb: stripPrivateNetworkASN(stubKnowledgeBase()), - kinds: []BareMetalType{Machine}}, - {expectedErrMsg: "'asn' of private (machine) resp. underlay (firewall) network must not be missing", - kb: stripUnderlayNetworkASN(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall}}, - {expectedErrMsg: "at least one 'nics/nic' definition must be present", - kb: stripNICs(stubKnowledgeBase()), - kinds: []BareMetalType{Machine}}, - {expectedErrMsg: "each 'nic' definition must contain a valid 'mac'", - kb: stripMACs(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall, Machine}}, - {expectedErrMsg: "private network must not lack prefixes since nat is required", - kb: setupIllegalNat(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall}}, - {expectedErrMsg: "non-private, non-underlay networks must contain destination prefix(es) to make any sense of it", - kb: stripDestinationPrefixesFromPublicNetworks(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall}}, - {expectedErrMsg: "networks with 'underlay: false' must contain a value of 'vrf' as it is used for BGP", - kb: stripVRFValueOfNonUnderlayNetworks(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall}}, - {expectedErrMsg: "each 'nic' definition must contain a valid 'mac'", - kb: unlegalizeMACs(stubKnowledgeBase()), - kinds: []BareMetalType{Firewall, Machine}}, - } - - for i, test := range tests { - for _, kind := range test.kinds { - t.Run(fmt.Sprintf("testcase %d - kind %v", i, kind), func(t *testing.T) { - actualErr := test.kb.Validate(kind) - if test.expectedErrMsg == "" { - require.NoError(t, actualErr) - return - } - require.EqualError(t, actualErr, test.expectedErrMsg, "expected error: %s", test.expectedErrMsg) - }) - } - } -} - -func stripVRFValueOfNonUnderlayNetworks(kb config) config { - for i := 0; i < len(kb.Networks); i++ { - // underlay runs in default vrf and no name is required - if kb.Networks[i].Underlay != nil && *kb.Networks[i].Underlay { - continue - } - vrf := int64(0) - kb.Networks[i].Vrf = &vrf - } - return kb -} - -// It makes no sense to have an public network without destination prefixes. -// Destination prefixes are used to import routes from the public network. -// Without route import there is no communication into that public network. -func stripDestinationPrefixesFromPublicNetworks(kb config) config { - kb.Networks[0].Nat = &boolTrue - for i := 0; i < len(kb.Networks); i++ { - if kb.Networks[i].Underlay != nil && !*kb.Networks[i].Underlay && kb.Networks[i].Private != nil && !*kb.Networks[i].Private { - kb.Networks[i].Destinationprefixes = []string{} - } - } - return kb -} - -func setupIllegalNat(kb config) config { - kb.Networks[0].Nat = &boolTrue - for i := 0; i < len(kb.Networks); i++ { - if kb.Networks[i].Private != nil && *kb.Networks[i].Private { - kb.Networks[i].Prefixes = []string{} - } - } - return kb -} - -func unlegalizeMACs(kb config) config { - mac := "1:2.3" - for i := 0; i < len(kb.Nics); i++ { - kb.Nics[i].Mac = &mac - } - return kb -} - -func stripMACs(kb config) config { - mac := "" - for i := 0; i < len(kb.Nics); i++ { - kb.Nics[i].Mac = &mac - } - return kb -} - -func stripNICs(kb config) config { - kb.Nics = []*models.V1MachineNic{} - return kb -} - -func stripUnderlayNetworkASN(kb config) config { - for i := 0; i < len(kb.Networks); i++ { - if kb.Networks[i].Underlay != nil && *kb.Networks[i].Underlay { - kb.Networks[i].Asn = &asn0 - } - } - return kb -} - -func stripPrivateNetworkASN(kb config) config { - for i := 0; i < len(kb.Networks); i++ { - if kb.Networks[i].Private != nil && *kb.Networks[i].Private { - kb.Networks[i].Asn = &asn0 - } - } - return kb -} - -func stripIPs(kb config) config { - for i := 0; i < len(kb.Networks); i++ { - kb.Networks[i].Ips = []string{} - } - return kb -} - -func stripNetworks(kb config) config { - kb.Networks = []*models.V1MachineNetwork{} - return kb -} - -func maskUnderlayNetworks(kb config) config { - privateSecondary := mn.PrivateSecondaryShared - for i, n := range kb.Networks { - if n.Networktype != nil && *n.Networktype == mn.Underlay { - kb.Networks[i].Underlay = &boolFalse - kb.Networks[i].Networktype = &privateSecondary - // avoid to run into validation error for absent vrf - kb.Networks[i].Vrf = &vrf1 - } - } - return kb -} - -func maskPrivatePrimaryNetworks(kb config) config { - privateUnshared := mn.PrivatePrimaryUnshared - for i := range kb.Networks { - kb.Networks[i].Networktype = &privateUnshared - } - return kb -} diff --git a/pkg/network/netobjects.go b/pkg/network/netobjects.go deleted file mode 100644 index 3393d26..0000000 --- a/pkg/network/netobjects.go +++ /dev/null @@ -1,100 +0,0 @@ -package network - -const ( - // IPv4ZeroCIDR is the CIDR block for the whole IPv4 address space - IPv4ZeroCIDR = "0.0.0.0/0" - - // IPv6ZeroCIDR is the CIDR block for the whole IPv6 address space - IPv6ZeroCIDR = "::/0" - // Permit defines an access policy that allows access. - Permit AccessPolicy = iota - // Deny defines an access policy that forbids access. - Deny -) - -type ( - // AccessPolicy is a type that represents a policy to manage access roles. - AccessPolicy int - - // Identity represents an object's identity. - Identity struct { - Comment string - ID int - } - - // Loopback represents a loopback interface (lo). - Loopback struct { - Comment string - IPs []string - } - - // VRF represents data required to render VRF information into frr.conf. - VRF struct { - Identity - Table int - VNI int - ImportVRFNames []string - IPPrefixLists []IPPrefixList - RouteMaps []RouteMap - FRRVersion *FRR - } - - FRR struct { - Major uint64 - Minor uint64 - } - // RouteMap represents a route-map to permit or deny routes. - RouteMap struct { - Name string - Entries []string - Policy string - Order int - } - - // IPPrefixList represents 'ip prefix-list' filtering mechanism to be used in combination with route-maps. - IPPrefixList struct { - Name string - Spec string - AddressFamily AddressFamily - // SourceVRF specifies from which VRF the given prefix list should be imported - SourceVRF string - } - - // SVI represents a switched virtual interface. - SVI struct { - VLANID int - Comment string - Addresses []string - } - - // VXLAN represents a VXLAN interface. - VXLAN struct { - Identity - TunnelIP string - } - - // EVPNIface represents the information required to render EVPN interfaces configuration. - EVPNIface struct { - Comment string - VRF VRF - SVI SVI - VXLAN VXLAN - } - - // Bridge represents a network bridge. - Bridge struct { - Ports string - Vids string - } -) - -func (p AccessPolicy) String() string { - switch p { - case Permit: - return "permit" - case Deny: - return "deny" - } - - return "undefined" -} diff --git a/pkg/network/network.go b/pkg/network/network.go new file mode 100644 index 0000000..d299e75 --- /dev/null +++ b/pkg/network/network.go @@ -0,0 +1,337 @@ +package network + +import ( + "fmt" + "net/netip" + + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/samber/lo" +) + +const ( + // mtuFirewall defines the value for MTU specific to the needs of a firewall. VXLAN requires higher MTU. + mtuFirewall = 9216 + // mtuMachine defines the value for MTU specific to the needs of a machine. + mtuMachine = 9000 + + // ipv4ZeroCIDR is the CIDR block for the whole IPv4 address space + ipv4ZeroCIDR = "0.0.0.0/0" + // ipv6ZeroCIDR is the CIDR block for the whole IPv6 address space + ipv6ZeroCIDR = "::/0" +) + +type ( + Network struct { + allocation *apiv2.MachineAllocation + } + + EvpnIface struct { + Network string + CIDRs []string + VlanID int + VrfID uint64 + } +) + +func New(allocation *apiv2.MachineAllocation) *Network { + return &Network{ + allocation: allocation, + } +} + +func (n *Network) MTU() int { + if n.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL { + return mtuFirewall + } + + return mtuMachine +} + +func (n *Network) Hostname() string { + return n.allocation.Hostname +} + +func (n *Network) IsMachine() bool { + return n.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE +} + +func (n *Network) HasVpn() bool { + if n.allocation.Vpn != nil && n.allocation.Vpn.AuthKey != "" { + return true + } + return false +} + +func (n *Network) Vpn() *apiv2.MachineVPN { + if n.allocation.Vpn != nil && n.allocation.Vpn.AuthKey != "" { + return n.allocation.Vpn + } + return nil +} + +func (n *Network) AllocationNetworks() []*apiv2.MachineNetwork { + return n.allocation.Networks +} + +func (n *Network) FirewallRules() *apiv2.FirewallRules { + return n.allocation.FirewallRules +} + +func (n *Network) NTPServers() (ntpServers []string) { + for _, ntpserver := range n.allocation.NtpServers { + ntpServers = append(ntpServers, ntpserver.Address) + } + return +} + +func (n *Network) LoopbackCIDRs() (cidrs []string, err error) { + var ips []string + + if n.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL { + ips, err = loFirewallIps(n.allocation.Networks) + if err != nil { + return nil, err + } + } else { + ips, err = loMachineIps(n.allocation.Networks) + if err != nil { + return nil, err + } + } + + for _, ip := range ips { + addr, err := netip.ParseAddr(ip) + if err != nil { + return nil, err + } + + cidrs = append(cidrs, fmt.Sprintf("%s/%d", addr.String(), addr.BitLen())) + } + + return +} + +func (n *Network) UnderlayNetwork() (*apiv2.MachineNetwork, error) { + for _, nw := range n.allocation.Networks { + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_UNDERLAY { + return nw, nil + } + } + return nil, fmt.Errorf("no underlay network present in network allocation") +} + +func (n *Network) PrivatePrimaryNetwork() (*apiv2.MachineNetwork, error) { + for _, nw := range n.allocation.Networks { + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_CHILD { + return nw, nil + } + } + + for _, nw := range n.allocation.Networks { + if nw.Project == nil { + continue + } + + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED && *nw.Project == n.allocation.Project { + return nw, nil + } + } + + return nil, fmt.Errorf("no private primary network present in network allocation") +} + +func (n *Network) PrivateSecondarySharedNetworks() (nws []*apiv2.MachineNetwork) { + for _, nw := range n.allocation.Networks { + if nw.Project == nil { + continue + } + + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED && *nw.Project != n.allocation.Project { + nws = append(nws, nw) + } + } + + return +} + +func (n *Network) PrivatePrimaryIPs() ([]string, error) { + if n.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL { + for _, nw := range n.allocation.Networks { + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_UNDERLAY { + return nw.Ips, nil + } + } + + return nil, fmt.Errorf("no private primary ip present in network allocation") + } + + for _, nw := range n.allocation.Networks { + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_CHILD { + return nw.Ips, nil + } + } + + for _, nw := range n.allocation.Networks { + if nw.Project == nil { + continue + } + + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED && *nw.Project == n.allocation.Project { + return nw.Ips, nil + } + } + + return nil, fmt.Errorf("no private primary ip present in network allocation") +} + +func (n *Network) PrivatePrimaryNetworksPrefixes() ([]string, error) { + for _, nw := range n.allocation.Networks { + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_CHILD { + return nw.Prefixes, nil + } + } + + for _, nw := range n.allocation.Networks { + if nw.Project == nil { + continue + } + + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED && *nw.Project == n.allocation.Project { + return nw.Prefixes, nil + } + } + + return nil, fmt.Errorf("no private primary networks present in network allocation") +} + +func (n *Network) VxlanIDs() (ids []uint64) { + if n.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL { + for _, nw := range n.allocation.Networks { + if nw.Vrf > 0 { + ids = append(ids, nw.Vrf) + } + } + } + + ids = lo.Uniq(ids) + + return +} + +func (n *Network) EVPNIfaces() (ifaces []EvpnIface, err error) { + if n.allocation.AllocationType == apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE { + return nil, fmt.Errorf("no evpn interfaces supported on machines") + } + + const vlanOffset = 1000 + + for i, nw := range n.allocation.Networks { + if nw.Vrf > 0 { + var cidrs []string + + for _, ip := range nw.Ips { + addr, err := netip.ParseAddr(ip) + if err != nil { + return nil, err + } + + cidrs = append(cidrs, fmt.Sprintf("%s/%d", addr.String(), addr.BitLen())) + } + + ifaces = append(ifaces, EvpnIface{ + Network: nw.Network, + CIDRs: cidrs, + VlanID: vlanOffset + i, + VrfID: nw.Vrf, + }) + } + } + + ifaces = lo.UniqBy(ifaces, func(iface EvpnIface) uint64 { + return iface.VrfID + }) + + return +} + +func (n *Network) GetNetworks(networkType apiv2.NetworkType) []*apiv2.MachineNetwork { + var networks []*apiv2.MachineNetwork + for _, nw := range n.allocation.Networks { + if nw.NetworkType == networkType { + networks = append(networks, nw) + } + } + return networks +} + +func (n *Network) GetExternalNetworkVrfNames() (vrfNames []string) { + for _, nw := range n.allocation.Networks { + if nw.NetworkType != apiv2.NetworkType_NETWORK_TYPE_EXTERNAL { + continue + } + + vrfNames = append(vrfNames, fmt.Sprintf("vrf%d", nw.Vrf)) + } + + return +} + +func (n *Network) GetDefaultRouteNetwork() (*apiv2.MachineNetwork, error) { + for _, nw := range n.allocation.Networks { + if nw.NetworkType == apiv2.NetworkType_NETWORK_TYPE_EXTERNAL { + if ContainsDefaultRoute(nw.DestinationPrefixes) { + return nw, nil + } + } + } + return nil, fmt.Errorf("no network which provides a default route found") +} + +func (n *Network) GetDefaultRouteNetworkVrfName() (string, error) { + nw, err := n.GetDefaultRouteNetwork() + if err != nil { + return "", err + } + return fmt.Sprintf("vrf%d", nw.Vrf), nil +} + +func (n *Network) GetTenantNetworkVrfName() (string, error) { + nw, err := n.PrivatePrimaryNetwork() + if err != nil { + return "", err + } + return fmt.Sprintf("vrf%d", nw.Vrf), nil +} + +func ContainsDefaultRoute(prefixes []string) bool { + for _, prefix := range prefixes { + if prefix == ipv4ZeroCIDR || prefix == ipv6ZeroCIDR { + return true + } + } + return false +} + +func loFirewallIps(networks []*apiv2.MachineNetwork) (ips []string, err error) { + for _, nw := range networks { + switch nw.NetworkType { + case apiv2.NetworkType_NETWORK_TYPE_UNDERLAY: + ips = append(ips, nw.Ips...) + } + } + + return +} + +func loMachineIps(networks []*apiv2.MachineNetwork) (ips []string, err error) { + for _, nw := range networks { + switch nw.NetworkType { + case apiv2.NetworkType_NETWORK_TYPE_CHILD, apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED: + ips = append(ips, nw.Ips...) + case apiv2.NetworkType_NETWORK_TYPE_EXTERNAL: + ips = append(ips, nw.Ips...) + } + } + + return +} diff --git a/pkg/network/network_test.go b/pkg/network/network_test.go new file mode 100644 index 0000000..2b66a7f --- /dev/null +++ b/pkg/network/network_test.go @@ -0,0 +1,1903 @@ +package network_test + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/stretchr/testify/require" +) + +func TestNetwork_MTU(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want int + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL}, + want: 9216, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE}, + want: 9000, + }, + { + name: "unknown", + allocation: &apiv2.MachineAllocation{AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_UNSPECIFIED}, + want: 9000, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.MTU() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_Hostname(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want string + }{ + { + name: "with hostname", + allocation: &apiv2.MachineAllocation{Hostname: "metal"}, + want: "metal", + }, + { + name: "without hostname", + allocation: &apiv2.MachineAllocation{Hostname: ""}, + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.Hostname() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_IsMachine(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want bool + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL}, + want: false, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE}, + want: true, + }, + { + name: "unknown", + allocation: &apiv2.MachineAllocation{AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_UNSPECIFIED}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.IsMachine() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_HasVpn(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want bool + }{ + { + name: "firewall with vpn", + allocation: &apiv2.MachineAllocation{Vpn: &apiv2.MachineVPN{AuthKey: "secret"}}, + want: true, + }, + { + name: "firewall with vpn but not authkey", + allocation: &apiv2.MachineAllocation{Vpn: &apiv2.MachineVPN{}}, + want: false, + }, + { + name: "firewall without vpn", + allocation: &apiv2.MachineAllocation{}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.HasVpn() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_NTPServers(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []string + }{ + { + name: "with one ntp", + allocation: &apiv2.MachineAllocation{NtpServers: []*apiv2.NTPServer{{Address: "ntp.pool.org"}}}, + want: []string{"ntp.pool.org"}, + }, + { + name: "with two ntp", + allocation: &apiv2.MachineAllocation{NtpServers: []*apiv2.NTPServer{{Address: "ntp.pool.org"}, {Address: "ntp2.pool.org"}}}, + want: []string{"ntp.pool.org", "ntp2.pool.org"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.NTPServers() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_LoopbackCIDRs(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []string + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: []string{"10.1.0.1/32"}, + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: []string{"10.0.16.2/32", "10.0.18.2/32"}, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.LoopbackCIDRs() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_UnderlayNetwork(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want *apiv2.MachineNetwork + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: &apiv2.MachineNetwork{ + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: nil, + wantErr: errors.New("no underlay network present in network allocation"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.UnderlayNetwork() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_PrivatePrimaryNetwork(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want *apiv2.MachineNetwork + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: &apiv2.MachineNetwork{ + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: &apiv2.MachineNetwork{ + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + wantErr: nil, + }, + { + name: "storage machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-b", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: &apiv2.MachineNetwork{ + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + wantErr: nil, + }, + { + name: "storage machine in wrong project", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-b", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-c"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: nil, + wantErr: errors.New("no private primary network present in network allocation"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.PrivatePrimaryNetwork() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_PrivateSecondarySharedNetworks(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []*apiv2.MachineNetwork + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + { + name: "storage machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-b", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.PrivateSecondarySharedNetworks() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_PrivatePrimaryIPs(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []string + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: []string{"10.1.0.1"}, + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: []string{"10.0.16.2"}, + wantErr: nil, + }, + { + name: "storage machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-b", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: []string{"10.0.18.2"}, + wantErr: nil, + }, + { + name: "storage machine in wrong project", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-b", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-c"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: nil, + wantErr: errors.New("no private primary ip present in network allocation"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.PrivatePrimaryIPs() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_PrivatePrimaryNetworksPrefixes(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []string + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: []string{"10.0.16.0/22"}, + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: []string{"10.0.16.0/22"}, + wantErr: nil, + }, + { + name: "storage machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-b", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: []string{"10.0.16.0/22"}, + wantErr: nil, + }, + { + name: "storage machine in wrong project", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + Project: "project-b", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-c"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: nil, + wantErr: errors.New("no private primary networks present in network allocation"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.PrivatePrimaryNetworksPrefixes() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_VxlanIDs(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []uint64 + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: []uint64{3981, 3982, 104009, 104010}, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: []uint64{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.VxlanIDs() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_EVPNIfaces(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []network.EvpnIface + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: []network.EvpnIface{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + CIDRs: []string{"10.0.16.2/32"}, + VlanID: 1000, + VrfID: 3981, + }, + { + Network: "partition-storage", + CIDRs: []string{"10.0.18.2/32"}, + VlanID: 1001, + VrfID: 3982, + }, + { + Network: "internet", + CIDRs: []string{"185.1.2.3/32", "185.1.2.4/32"}, + VlanID: 1002, + VrfID: 104009, + }, + { + Network: "mpls", + CIDRs: []string{"100.127.129.1/32"}, + VlanID: 1004, + VrfID: 104010, + }, + }, + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: nil, + wantErr: errors.New("no evpn interfaces supported on machines"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.EVPNIfaces() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_GetNetworks(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + networkType apiv2.NetworkType + want []*apiv2.MachineNetwork + }{ + { + name: "firewall external networks", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + networkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + want: []*apiv2.MachineNetwork{ + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + + { + name: "firewall underlay networks", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + networkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + want: []*apiv2.MachineNetwork{ + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.GetNetworks(tt.networkType) + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_GetExternalNetworkVrfNames(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want []string + }{ + { + name: "firewall external networks", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: []string{"vrf104009", "vrf104010"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got := n.GetExternalNetworkVrfNames() + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_GetDefaultRouteNetwork(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want *apiv2.MachineNetwork + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: &apiv2.MachineNetwork{ + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: nil, + wantErr: errors.New("no network which provides a default route found"), + }, + + { + name: "firewall dualstack", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2a02:c00:20::1", "185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "2a02:c00:20::/45"}, + DestinationPrefixes: []string{"::/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: &apiv2.MachineNetwork{ + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2a02:c00:20::1", "185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "2a02:c00:20::/45"}, + DestinationPrefixes: []string{"::/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.GetDefaultRouteNetwork() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_GetDefaultRouteNetworkVrfName(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want string + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: "vrf104009", + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: "", + wantErr: errors.New("no network which provides a default route found"), + }, + + { + name: "firewall dualstack", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2a02:c00:20::1", "185.1.2.3"}, + Prefixes: []string{"185.1.2.0/24", "2a02:c00:20::/45"}, + DestinationPrefixes: []string{"::/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: "vrf104009", + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.GetDefaultRouteNetworkVrfName() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + require.Equal(t, tt.want, got) + }) + } +} + +func TestNetwork_GetTenantNetworkVrfName(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + want string + wantErr error + }{ + { + name: "firewall", + allocation: &apiv2.MachineAllocation{ + Hostname: "firewall", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "project-a", + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3", "185.1.2.4"}, + Prefixes: []string{"185.1.2.0/24", "185.27.0.0/22"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Asn: 4200003073, + Ips: []string{"10.1.0.1"}, + Prefixes: []string{"10.0.12.0/22"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/24"}, + Ips: []string{"100.127.129.1"}, + DestinationPrefixes: []string{"100.127.1.0/24"}, + Vrf: 104010, + Asn: 4200003073, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + }, + }, + want: "vrf3981", + wantErr: nil, + }, + { + name: "machine", + allocation: &apiv2.MachineAllocation{ + Hostname: "machine", + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_MACHINE, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + Project: new("project-a"), + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + Asn: 4200003073, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Project: new("project-b"), + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Asn: 4200003073, + }, + }, + }, + want: "vrf3981", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := network.New(tt.allocation) + got, err := n.GetTenantNetworkVrfName() + if diff := cmp.Diff(tt.wantErr, err, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + require.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/network/nftables.go b/pkg/network/nftables.go deleted file mode 100644 index d364d1f..0000000 --- a/pkg/network/nftables.go +++ /dev/null @@ -1,309 +0,0 @@ -package network - -import ( - "fmt" - "log/slog" - "net/netip" - "strconv" - "strings" - - "github.com/metal-stack/metal-go/api/models" - mn "github.com/metal-stack/metal-lib/pkg/net" - - "github.com/metal-stack/os-installer/pkg/exec" - "github.com/metal-stack/os-installer/pkg/net" -) - -const ( - // TplNftables defines the name of the template to render nftables configuration. - TplNftables = "nftrules.tpl" - dnsPort = "domain" - nftablesService = "nftables.service" - systemctlBin = "/bin/systemctl" - - // Set up additional conntrack zone for DNS traffic. - // There was a problem that duplicate packets were registered by conntrack - // when packet was leaking from private VRF to the internet VRF. - // Isolating traffic to special zone solves the problem. - // Zone number(3) was obtained by experiments. - dnsProxyZone = "3" -) - -type ( - // NftablesData represents the information required to render nftables configuration. - NftablesData struct { - Comment string - SNAT []SNAT - DNSProxyDNAT DNAT - VPN bool - ForwardPolicy string - FirewallRules FirewallRules - Input Input - } - - Input struct { - InInterfaces []string - } - - FirewallRules struct { - Egress []string - Ingress []string - } - - // SNAT holds the information required to configure Source NAT. - SNAT struct { - Comment string - OutInterface string - OutIntSpec AddrSpec - SourceSpecs []AddrSpec - } - - // DNAT holds the information required to configure DNAT. - DNAT struct { - Comment string - InInterfaces []string - SAddr string - DAddr string - Port string - Zone string - DestSpec AddrSpec - } - - AddrSpec struct { - AddressFamily string - Address string - } - - // NftablesValidator can validate configuration for nftables rules. - NftablesValidator struct { - path string - log *slog.Logger - } - - NftablesReloader struct{} -) - -// newNftablesConfigApplier constructs a new instance of this type. -func newNftablesConfigApplier(c config, validator net.Validator, enableDNSProxy bool, forwardPolicy ForwardPolicy) net.Applier { - data := NftablesData{ - Comment: versionHeader(c.MachineUUID), - SNAT: getSNAT(c, enableDNSProxy), - ForwardPolicy: string(forwardPolicy), - FirewallRules: getFirewallRules(c), - Input: getInput(c), - } - - if enableDNSProxy { - data.DNSProxyDNAT = getDNSProxyDNAT(c, dnsPort, dnsProxyZone) - } - - if c.VPN != nil { - data.VPN = true - } - - return net.NewNetworkApplier(data, validator, &NftablesReloader{}) -} - -func (*NftablesReloader) Reload() error { - return exec.NewVerboseCmd(systemctlBin, "reload", nftablesService).Run() -} - -func isDMZNetwork(n *models.V1MachineNetwork) bool { - return *n.Networktype == mn.PrivateSecondaryShared && containsDefaultRoute(n.Destinationprefixes) -} - -func getInput(c config) Input { - input := Input{} - networks := c.GetNetworks(mn.PrivatePrimaryUnshared, mn.PrivatePrimaryShared, mn.PrivateSecondaryShared) - for _, n := range networks { - input.InInterfaces = append(input.InInterfaces, fmt.Sprintf("vrf%d", *n.Vrf)) - } - return input -} - -func getSNAT(c config, enableDNSProxy bool) []SNAT { - var result []SNAT - - private := c.getPrivatePrimaryNetwork() - networks := c.GetNetworks(mn.PrivatePrimaryUnshared, mn.PrivatePrimaryShared, mn.PrivateSecondaryShared, mn.External) - - privatePfx := private.Prefixes - for _, n := range c.Networks { - if isDMZNetwork(n) { - privatePfx = append(privatePfx, n.Prefixes...) - } - - } - - var ( - defaultNetwork models.V1MachineNetwork - defaultAF string - ) - defaultNetworkName, err := c.getDefaultRouteVRFName() - if err == nil { - defaultNetwork = *c.GetDefaultRouteNetwork() - ip, _ := netip.ParseAddr(defaultNetwork.Ips[0]) - defaultAF = "ip" - if ip.Is6() { - defaultAF = "ip6" - } - } - for _, n := range networks { - if n.Nat != nil && !*n.Nat { - continue - } - - var sources []AddrSpec - cmt := fmt.Sprintf("snat (networkid: %s)", *n.Networkid) - svi := fmt.Sprintf("vlan%d", *n.Vrf) - - for _, p := range privatePfx { - af, err := getAddressFamily(p) - if err != nil { - continue - } - sspec := AddrSpec{ - Address: p, - AddressFamily: af, - } - sources = append(sources, sspec) - } - s := SNAT{ - Comment: cmt, - OutInterface: svi, - SourceSpecs: sources, - } - - if enableDNSProxy && (vrfNameOf(n) == defaultNetworkName) { - s.OutIntSpec = AddrSpec{ - AddressFamily: defaultAF, - Address: defaultNetwork.Ips[0], - } - } - result = append(result, s) - } - - return result -} - -func getDNSProxyDNAT(c config, port, zone string) DNAT { - networks := c.GetNetworks(mn.PrivatePrimaryUnshared, mn.PrivatePrimaryShared, mn.PrivateSecondaryShared) - svis := []string{} - for _, n := range networks { - svi := fmt.Sprintf("vlan%d", *n.Vrf) - svis = append(svis, svi) - } - - n := c.GetDefaultRouteNetwork() - if n == nil { - return DNAT{} - } - - ip, _ := netip.ParseAddr(n.Ips[0]) - af := "ip" - saddr := "10.0.0.0/8" - daddr := "@proxy_dns_servers" - if ip.Is6() { - af = "ip6" - saddr = "fd00::/8" - daddr = "@proxy_dns_servers_v6" - } - return DNAT{ - Comment: "dnat to dns proxy", - InInterfaces: svis, - SAddr: saddr, - DAddr: daddr, - Port: port, - Zone: zone, - DestSpec: AddrSpec{ - AddressFamily: af, - Address: n.Ips[0], - }, - } -} - -func getFirewallRules(c config) FirewallRules { - if c.FirewallRules == nil { - return FirewallRules{} - } - var ( - egressRules = []string{"# egress rules specified during firewall creation"} - ingressRules = []string{"# ingress rules specified during firewall creation"} - inputInterfaces = getInput(c) - quotedInputInterfaces []string - ) - for _, i := range inputInterfaces.InInterfaces { - quotedInputInterfaces = append(quotedInputInterfaces, "\""+i+"\"") - } - - for _, r := range c.FirewallRules.Egress { - ports := make([]string, len(r.Ports)) - for i, v := range r.Ports { - ports[i] = strconv.Itoa(int(v)) - } - for _, daddr := range r.To { - af, err := getAddressFamily(daddr) - if err != nil { - continue - } - egressRules = append(egressRules, - fmt.Sprintf("iifname { %s } %s daddr %s %s dport { %s } counter accept comment %q", strings.Join(quotedInputInterfaces, ","), af, daddr, strings.ToLower(r.Protocol), strings.Join(ports, ","), r.Comment)) - } - } - - privatePrimaryNetwork := c.getPrivatePrimaryNetwork() - outputInterfacenames := "" - if privatePrimaryNetwork != nil && privatePrimaryNetwork.Vrf != nil { - outputInterfacenames = fmt.Sprintf("oifname { \"vrf%d\", \"vni%d\", \"vlan%d\" }", *privatePrimaryNetwork.Vrf, *privatePrimaryNetwork.Vrf, *privatePrimaryNetwork.Vrf) - } - - for _, r := range c.FirewallRules.Ingress { - ports := make([]string, len(r.Ports)) - for i, v := range r.Ports { - ports[i] = strconv.Itoa(int(v)) - } - destinationSpec := "" - if len(r.To) > 0 { - af, err := getAddressFamily(r.To[0]) // To is validated to contain no mixed addressfamilies in metal-api - if err != nil { - continue - } - destinationSpec = fmt.Sprintf("%s daddr { %s }", af, strings.Join(r.To, ", ")) - } else if outputInterfacenames != "" { - destinationSpec = outputInterfacenames - } else { - c.log.Warn("no to address specified but not private primary network present, skipping this rule", "rule", r) - continue - } - - for _, saddr := range r.From { - af, err := getAddressFamily(saddr) - if err != nil { - continue - } - ingressRules = append(ingressRules, fmt.Sprintf("%s %s saddr %s %s dport { %s } counter accept comment %q", destinationSpec, af, saddr, strings.ToLower(r.Protocol), strings.Join(ports, ","), r.Comment)) - } - } - return FirewallRules{ - Egress: egressRules, - Ingress: ingressRules, - } -} - -func getAddressFamily(p string) (string, error) { - prefix, err := netip.ParsePrefix(p) - if err != nil { - return "", err - } - family := "ip" - if prefix.Addr().Is6() { - family = "ip6" - } - return family, nil -} - -// Validate validates network interfaces configuration. -func (v NftablesValidator) Validate() error { - v.log.Info("running 'nft --check --file' to validate changes.", "file", v.path) - return exec.NewVerboseCmd("nft", "--check", "--file", v.path).Run() -} diff --git a/pkg/network/nftables_exporter.go b/pkg/network/nftables_exporter.go deleted file mode 100644 index e178066..0000000 --- a/pkg/network/nftables_exporter.go +++ /dev/null @@ -1,29 +0,0 @@ -package network - -import ( - "github.com/metal-stack/os-installer/pkg/net" -) - -// TplNftablesExporter is the name of the template for the nftables_exporter service. -const tplNftablesExporter = "nftables_exporter.service.tpl" - -// SystemdUnitNftablesExporter is the name of the systemd unit for the nftables_exporter. -const systemdUnitNftablesExporter = "nftables-exporter.service" - -// NftablesExporterData contains the data to render the nftables_exporter service template. -type NftablesExporterData struct { - Comment string - TenantVrf string -} - -// NewNftablesExporterServiceApplier constructs a new instance of this type. -func NewNftablesExporterServiceApplier(kb config, v net.Validator) (net.Applier, error) { - tenantVrf, err := getTenantVRFName(kb) - if err != nil { - return nil, err - } - - data := NftablesExporterData{Comment: versionHeader(kb.MachineUUID), TenantVrf: tenantVrf} - - return net.NewNetworkApplier(data, v, nil), nil -} diff --git a/pkg/network/nftables_test.go b/pkg/network/nftables_test.go deleted file mode 100644 index c51c270..0000000 --- a/pkg/network/nftables_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package network - -import ( - "bytes" - "log/slog" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCompileNftRules(t *testing.T) { - - tests := []struct { - input string - expected string - enableDNSProxy bool - forwardPolicy ForwardPolicy - }{ - { - input: "testdata/firewall.yaml", - expected: "testdata/nftrules", - enableDNSProxy: false, - forwardPolicy: ForwardPolicyDrop, - }, - { - input: "testdata/firewall.yaml", - expected: "testdata/nftrules_accept_forwarding", - enableDNSProxy: false, - forwardPolicy: ForwardPolicyAccept, - }, - { - input: "testdata/firewall_dmz.yaml", - expected: "testdata/nftrules_dmz", - enableDNSProxy: true, - forwardPolicy: ForwardPolicyDrop, - }, - { - input: "testdata/firewall_dmz_app.yaml", - expected: "testdata/nftrules_dmz_app", - enableDNSProxy: true, - forwardPolicy: ForwardPolicyDrop, - }, - { - input: "testdata/firewall_ipv6.yaml", - expected: "testdata/nftrules_ipv6", - enableDNSProxy: true, - forwardPolicy: ForwardPolicyDrop, - }, - { - input: "testdata/firewall_shared.yaml", - expected: "testdata/nftrules_shared", - enableDNSProxy: true, - forwardPolicy: ForwardPolicyDrop, - }, - { - input: "testdata/firewall_vpn.yaml", - expected: "testdata/nftrules_vpn", - enableDNSProxy: false, - forwardPolicy: ForwardPolicyDrop, - }, - { - input: "testdata/firewall_with_rules.yaml", - expected: "testdata/nftrules_with_rules", - enableDNSProxy: false, - forwardPolicy: ForwardPolicyDrop, - }, - } - log := slog.Default() - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - expected, err := os.ReadFile(tt.expected) - require.NoError(t, err) - - kb, err := New(log, tt.input) - require.NoError(t, err) - - a := newNftablesConfigApplier(*kb, nil, tt.enableDNSProxy, tt.forwardPolicy) - b := bytes.Buffer{} - - tpl := MustParseTpl(TplNftables) - err = a.Render(&b, *tpl) - require.NoError(t, err) - assert.Equal(t, string(expected), b.String()) - }) - } -} diff --git a/pkg/network/node_exporter.go b/pkg/network/node_exporter.go deleted file mode 100644 index 3fd22b3..0000000 --- a/pkg/network/node_exporter.go +++ /dev/null @@ -1,29 +0,0 @@ -package network - -import ( - "github.com/metal-stack/os-installer/pkg/net" -) - -// tplNodeExporter is the name of the template for the node_exporter service. -const tplNodeExporter = "node_exporter.service.tpl" - -// systemdUnitNodeExporter is the name of the systemd unit for the node_exporter. -const systemdUnitNodeExporter = "node-exporter.service" - -// NodeExporterData contains the data to render the node_exporter service template. -type NodeExporterData struct { - Comment string - TenantVrf string -} - -// newNodeExporterServiceApplier constructs a new instance of this type. -func newNodeExporterServiceApplier(kb config, v net.Validator) (net.Applier, error) { - tenantVrf, err := getTenantVRFName(kb) - if err != nil { - return nil, err - } - - data := NodeExporterData{Comment: versionHeader(kb.MachineUUID), TenantVrf: tenantVrf} - - return net.NewNetworkApplier(data, v, nil), nil -} diff --git a/pkg/network/routemap_test.go b/pkg/network/routemap_test.go deleted file mode 100644 index 6f51a31..0000000 --- a/pkg/network/routemap_test.go +++ /dev/null @@ -1,320 +0,0 @@ -package network - -import ( - "fmt" - "log/slog" - "net/netip" - "reflect" - "testing" - - "github.com/stretchr/testify/require" -) - -type network struct { - vrf string - prefixes []importPrefix - destinations []importPrefix -} - -var ( - defaultRoute = importPrefix{Prefix: netip.MustParsePrefix("0.0.0.0/0"), Policy: Permit, SourceVRF: inetVrf} - defaultRoute6 = importPrefix{Prefix: netip.MustParsePrefix("::/0"), Policy: Permit, SourceVRF: inetVrf} - defaultRouteFromDMZ = importPrefix{Prefix: netip.MustParsePrefix("0.0.0.0/0"), Policy: Permit, SourceVRF: dmzVrf} - externalVrf = "vrf104010" - externalNet = importPrefix{Prefix: netip.MustParsePrefix("100.127.129.0/24"), Policy: Permit, SourceVRF: externalVrf} - externalDestinationNet = importPrefix{Prefix: netip.MustParsePrefix("100.127.1.0/24"), Policy: Permit, SourceVRF: externalVrf} - privateVrf = "vrf3981" - privateNet = importPrefix{Prefix: netip.MustParsePrefix("10.0.16.0/22"), Policy: Permit, SourceVRF: privateVrf} - privateNet6 = importPrefix{Prefix: netip.MustParsePrefix("2002::/64"), Policy: Permit, SourceVRF: privateVrf} - sharedVrf = "vrf3982" - sharedNet = importPrefix{Prefix: netip.MustParsePrefix("10.0.18.0/22"), Policy: Permit, SourceVRF: sharedVrf} - dmzVrf = "vrf3983" - dmzNet = importPrefix{Prefix: netip.MustParsePrefix("10.0.20.0/22"), Policy: Permit, SourceVRF: dmzVrf} - inetVrf = "vrf104009" - inetNet1 = importPrefix{Prefix: netip.MustParsePrefix("185.1.2.0/24"), Policy: Permit, SourceVRF: inetVrf} - inetNet2 = importPrefix{Prefix: netip.MustParsePrefix("185.27.0.0/22"), Policy: Permit, SourceVRF: inetVrf} - inetNet6 = importPrefix{Prefix: netip.MustParsePrefix("2a02:c00:20::/45"), Policy: Permit, SourceVRF: inetVrf} - publicDefaultNet = importPrefix{Prefix: netip.MustParsePrefix("185.1.2.3/32"), Policy: Deny, SourceVRF: inetVrf} - publicDefaultNet2 = importPrefix{Prefix: netip.MustParsePrefix("10.0.20.2/32"), Policy: Deny, SourceVRF: dmzVrf} - publicDefaultNetIPv6 = importPrefix{Prefix: netip.MustParsePrefix("2a02:c00:20::1/128"), Policy: Deny, SourceVRF: inetVrf} - - private = network{ - vrf: privateVrf, - prefixes: []importPrefix{privateNet}, - } - - private6 = network{ - vrf: privateVrf, - prefixes: []importPrefix{privateNet6}, - } - - inet = network{ - vrf: inetVrf, - prefixes: []importPrefix{inetNet1, inetNet2}, - destinations: []importPrefix{defaultRoute}, - } - - inet6 = network{ - vrf: inetVrf, - prefixes: []importPrefix{inetNet6}, - destinations: []importPrefix{defaultRoute6}, - } - dualstack = network{ - vrf: inetVrf, - prefixes: []importPrefix{inetNet1, inetNet6}, - destinations: []importPrefix{defaultRoute6}, - } - external = network{ - vrf: externalVrf, - destinations: []importPrefix{externalDestinationNet}, - prefixes: []importPrefix{externalNet}, - } - - shared = network{ - vrf: sharedVrf, - prefixes: []importPrefix{sharedNet}, - } - - dmz = network{ - vrf: dmzVrf, - prefixes: []importPrefix{dmzNet}, - destinations: []importPrefix{defaultRouteFromDMZ}, - } -) - -func leakFrom(pfxs []importPrefix, sourceVrf string) []importPrefix { - r := []importPrefix{} - for _, e := range pfxs { - i := e - i.SourceVRF = sourceVrf - r = append(r, i) - } - return r -} - -func Test_importRulesForNetwork(t *testing.T) { - tests := []struct { - name string - input string - want map[string]map[string]ImportSettings - }{ - { - name: "standard firewall with private primary unshared network, private secondary shared network, internet and mpls", - input: "testdata/firewall.yaml", - want: map[string]map[string]ImportSettings{ - // The target VRF - private.vrf: { - // Imported VRFs with their restrictions - inet.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(inet.destinations, []importPrefix{publicDefaultNet}, inet.prefixes), - }, - external.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(external.destinations, external.prefixes), - }, - shared.vrf: ImportSettings{ - ImportPrefixes: shared.prefixes, - }, - }, - shared.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(private.prefixes, leakFrom(shared.prefixes, private.vrf)), - }, - }, - inet.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: leakFrom(inet.prefixes, private.vrf), - ImportPrefixesNoExport: private.prefixes, - }, - }, - external.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: leakFrom(external.prefixes, private.vrf), - ImportPrefixesNoExport: private.prefixes, - }, - }, - }, - }, - { - name: "firewall of a shared private network (shared/storage firewall)", - input: "testdata/firewall_shared.yaml", - want: map[string]map[string]ImportSettings{ - shared.vrf: { - inet.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(inet.destinations, []importPrefix{publicDefaultNet}, inet.prefixes), - }, - }, - inet.vrf: { - shared.vrf: ImportSettings{ - ImportPrefixes: leakFrom(inet.prefixes, shared.vrf), - ImportPrefixesNoExport: shared.prefixes, - }, - }, - }, - }, - { - name: "firewall of a private network with dmz network and internet (dmz firewall)", - input: "testdata/firewall_dmz.yaml", - want: map[string]map[string]ImportSettings{ - private.vrf: { - inet.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(inet.destinations, []importPrefix{publicDefaultNet}, inet.prefixes), - }, - dmz.vrf: ImportSettings{ - ImportPrefixes: dmz.prefixes, - }, - }, - dmz.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(private.prefixes, leakFrom(dmz.prefixes, private.vrf)), - }, - inet.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(inet.destinations, inet.prefixes), - }, - }, - inet.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: leakFrom(inet.prefixes, private.vrf), - ImportPrefixesNoExport: private.prefixes, - }, - dmz.vrf: ImportSettings{ - ImportPrefixesNoExport: dmz.prefixes, - }, - }, - }, - }, - { - name: "firewall of a private network with dmz network (dmz app firewall)", - input: "testdata/firewall_dmz_app.yaml", - want: map[string]map[string]ImportSettings{ - private.vrf: { - dmz.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices([]importPrefix{publicDefaultNet2}, dmz.prefixes, dmz.destinations), - }, - }, - dmz.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(private.prefixes, leakFrom(dmz.prefixes, private.vrf)), - }, - }, - }, - }, - { - name: "firewall of a private network with dmz network and storage (dmz app firewall)", - input: "testdata/firewall_dmz_app_storage.yaml", - want: map[string]map[string]ImportSettings{ - private.vrf: { - shared.vrf: ImportSettings{ - ImportPrefixes: shared.prefixes, - }, - dmz.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices([]importPrefix{publicDefaultNet2}, dmz.prefixes, dmz.destinations), - }, - }, - dmz.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(private.prefixes, leakFrom(dmz.prefixes, private.vrf)), - }, - }, - shared.vrf: { - private.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(private.prefixes, leakFrom(shared.prefixes, private.vrf)), - }, - }, - }, - }, - { - name: "firewall with ipv6 private network and ipv6 internet network", - input: "testdata/firewall_ipv6.yaml", - want: map[string]map[string]ImportSettings{ - private6.vrf: { - inet6.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(inet6.destinations, []importPrefix{publicDefaultNetIPv6}, inet6.prefixes), - }, - external.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(external.destinations, external.prefixes), - }, - shared.vrf: ImportSettings{ - ImportPrefixes: shared.prefixes, - }, - }, - shared.vrf: { - private6.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(private6.prefixes, leakFrom(shared.prefixes, private6.vrf)), - }, - }, - inet6.vrf: { - private6.vrf: ImportSettings{ - ImportPrefixes: leakFrom(inet6.prefixes, private6.vrf), - ImportPrefixesNoExport: private6.prefixes, - }, - }, - external.vrf: { - private6.vrf: ImportSettings{ - ImportPrefixes: leakFrom(external.prefixes, private6.vrf), - ImportPrefixesNoExport: private6.prefixes, - }, - }, - }, - }, - { - name: "firewall with ipv6 private network and dualstack internet network", - input: "testdata/firewall_dualstack.yaml", - want: map[string]map[string]ImportSettings{ - private6.vrf: { - inet6.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(inet6.destinations, []importPrefix{publicDefaultNetIPv6, publicDefaultNet}, dualstack.prefixes), - }, - external.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(external.destinations, external.prefixes), - }, - shared.vrf: ImportSettings{ - ImportPrefixes: shared.prefixes, - }, - }, - shared.vrf: { - private6.vrf: ImportSettings{ - ImportPrefixes: concatPfxSlices(private6.prefixes, leakFrom(shared.prefixes, private6.vrf)), - }, - }, - inet6.vrf: { - private6.vrf: ImportSettings{ - ImportPrefixes: leakFrom(dualstack.prefixes, private6.vrf), - ImportPrefixesNoExport: private6.prefixes, - }, - }, - external.vrf: { - private6.vrf: ImportSettings{ - ImportPrefixes: leakFrom(external.prefixes, private6.vrf), - ImportPrefixesNoExport: private6.prefixes, - }, - }, - }, - }, - } - log := slog.Default() - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - kb, err := New(log, tt.input) - require.NoError(t, err) - err = kb.Validate(Firewall) - if err != nil { - t.Errorf("%s is not valid: %v", tt.input, err) - return - } - for _, network := range kb.Networks { - got := importRulesForNetwork(*kb, network) - if got == nil { - continue - } - gotBySourceVrf := got.bySourceVrf() - targetVrf := fmt.Sprintf("vrf%d", *network.Vrf) - want := tt.want[targetVrf] - - if !reflect.DeepEqual(gotBySourceVrf, want) { - t.Errorf("importRulesForNetwork() \ntargetVrf: %s \ng: %v, \nw: %v", targetVrf, gotBySourceVrf, want) - } - } - }) - } -} diff --git a/pkg/network/service_test.go b/pkg/network/service_test.go deleted file mode 100644 index 301a519..0000000 --- a/pkg/network/service_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package network - -import ( - "bytes" - "log/slog" - "os" - "testing" - - "github.com/metal-stack/os-installer/pkg/net" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestServices(t *testing.T) { - log := slog.Default() - - kb, err := New(log, "testdata/firewall.yaml") - require.NoError(t, err) - v := serviceValidator{} - dsApplier, err := newDroptailerServiceApplier(*kb, v) - require.NoError(t, err) - fcApplier, err := newFirewallControllerServiceApplier(*kb, v) - require.NoError(t, err) - nodeExporterApplier, err := newNodeExporterServiceApplier(*kb, v) - require.NoError(t, err) - suApplier, err := newSuricataUpdateServiceApplier(*kb, v) - require.NoError(t, err) - nftablesExporterApplier, err := NewNftablesExporterServiceApplier(*kb, v) - require.NoError(t, err) - - tests := []struct { - applier net.Applier - expected string - template string - }{ - { - applier: dsApplier, - expected: "testdata/droptailer.service", - template: tplDroptailer, - }, - { - applier: fcApplier, - expected: "testdata/firewall-controller.service", - template: tplFirewallController, - }, - { - applier: nodeExporterApplier, - expected: "testdata/node-exporter.service", - template: tplNodeExporter, - }, - { - applier: nftablesExporterApplier, - expected: "testdata/nftables-exporter.service", - template: tplNftablesExporter, - }, - { - applier: suApplier, - expected: "testdata/suricata-update.service", - template: tplSuricataUpdate, - }, - } - - for _, test := range tests { - expected, err := os.ReadFile(test.expected) - require.NoError(t, err) - - b := bytes.Buffer{} - tpl := MustParseTpl(test.template) - err = test.applier.Render(&b, *tpl) - require.NoError(t, err) - assert.Equal(t, string(expected), b.String()) - } -} diff --git a/pkg/network/suricata_config.go b/pkg/network/suricata_config.go deleted file mode 100644 index 55d8c5e..0000000 --- a/pkg/network/suricata_config.go +++ /dev/null @@ -1,41 +0,0 @@ -package network - -import ( - "strings" - - "github.com/metal-stack/os-installer/pkg/net" -) - -// tplSuricataConfig is the name of the template for the suricata configuration. -const tplSuricataConfig = "suricata_config.yaml.tpl" - -// SuricataConfigData represents the information required to render suricata configuration. -type SuricataConfigData struct { - Comment string - DefaultRouteVrf string - Interface string -} - -// suricataConfigValidator can validate configuration for suricata. -type suricataConfigValidator struct { - path string -} - -// newSuricataConfigApplier constructs a new instance of this type. -func newSuricataConfigApplier(kb config, tmpFile string) (net.Applier, error) { - defaultRouteVrf, err := kb.getDefaultRouteVRFName() - if err != nil { - return nil, err - } - - i := strings.Replace(defaultRouteVrf, "vrf", "vlan", 1) - data := SuricataConfigData{Comment: versionHeader(kb.MachineUUID), DefaultRouteVrf: defaultRouteVrf, Interface: i} - validator := suricataConfigValidator{tmpFile} - - return net.NewNetworkApplier(data, validator, nil), nil -} - -// Validate validates suricata configuration. -func (v suricataConfigValidator) Validate() error { - return nil -} diff --git a/pkg/network/suricata_defaults.go b/pkg/network/suricata_defaults.go deleted file mode 100644 index bf1df2d..0000000 --- a/pkg/network/suricata_defaults.go +++ /dev/null @@ -1,40 +0,0 @@ -package network - -import ( - "strings" - - "github.com/metal-stack/os-installer/pkg/net" -) - -// tplSuricataDefaults is the name of the template for the suricata defaults. -const tplSuricataDefaults = "suricata_defaults.tpl" - -// SuricataDefaultsData represents the information required to render suricata defaults. -type SuricataDefaultsData struct { - Comment string - Interface string -} - -// suricataDefaultsValidator can validate defaults for suricata. -type suricataDefaultsValidator struct { - path string -} - -// newSuricataDefaultsApplier constructs a new instance of this type. -func newSuricataDefaultsApplier(kb config, tmpFile string) (net.Applier, error) { - defaultRouteVrf, err := kb.getDefaultRouteVRFName() - if err != nil { - return nil, err - } - - i := strings.Replace(defaultRouteVrf, "vrf", "vlan", 1) - data := SuricataDefaultsData{Comment: versionHeader(kb.MachineUUID), Interface: i} - validator := suricataDefaultsValidator{path: tmpFile} - - return net.NewNetworkApplier(data, validator, nil), nil -} - -// Validate validates suricata defaults. -func (v suricataDefaultsValidator) Validate() error { - return nil -} diff --git a/pkg/network/suricata_update.go b/pkg/network/suricata_update.go deleted file mode 100644 index 616817a..0000000 --- a/pkg/network/suricata_update.go +++ /dev/null @@ -1,29 +0,0 @@ -package network - -import ( - "github.com/metal-stack/os-installer/pkg/net" -) - -// tplSuricataUpdate is the name of the template for the suricata-update service. -const tplSuricataUpdate = "suricata_update.service.tpl" - -// systemdUnitSuricataUpdate is the name of the systemd unit for the suricata-update. -const systemdUnitSuricataUpdate = "suricata-update.service" - -// SuricataUpdateData contains the data to render the suricata-update service template. -type SuricataUpdateData struct { - Comment string - DefaultRouteVrf string -} - -// newSuricataUpdateServiceApplier constructs a new instance of this type. -func newSuricataUpdateServiceApplier(kb config, v net.Validator) (net.Applier, error) { - defaultRouteVrf, err := kb.getDefaultRouteVRFName() - if err != nil { - return nil, err - } - - data := SuricataUpdateData{Comment: versionHeader(kb.MachineUUID), DefaultRouteVrf: defaultRouteVrf} - - return net.NewNetworkApplier(data, v, nil), nil -} diff --git a/pkg/network/systemd.go b/pkg/network/systemd.go deleted file mode 100644 index 9f57fef..0000000 --- a/pkg/network/systemd.go +++ /dev/null @@ -1,82 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/metal-stack/metal-go/api/models" - "github.com/metal-stack/os-installer/pkg/net" -) - -const ( - // tplSystemdLinkLan defines the name of the template to render system.link file. - tplSystemdLinkLan = "networkd/10-lan.link.tpl" - - tplSystemdNetworkLo = "networkd/00-lo.network.tpl" - // tplSystemdNetworkLan defines the name of the template to render system.network file. - tplSystemdNetworkLan = "networkd/10-lan.network.tpl" - // mtuFirewall defines the value for MTU specific to the needs of a firewall. VXLAN requires higher MTU. - mtuFirewall = 9216 - // mtuMachine defines the value for MTU specific to the needs of a machine. - mtuMachine = 9000 -) - -type ( - // SystemdCommonData contains attributes common to systemd.network and systemd.link files. - SystemdCommonData struct { - Comment string - Index int - } - - // SystemdLinkData contains attributes required to render systemd.link files. - SystemdLinkData struct { - SystemdCommonData - MAC string - MTU int - EVPNIfaces []EVPNIface - } - - // systemdValidator validates systemd.network and system.link files. - systemdValidator struct { - path string - } -) - -// newSystemdNetworkdApplier creates a new Applier to configure systemd.network. -func newSystemdNetworkdApplier(tmpFile string, data any) net.Applier { - validator := systemdValidator{tmpFile} - - return net.NewNetworkApplier(data, validator, nil) -} - -// newSystemdLinkApplier creates a new Applier to configure systemd.link. -func newSystemdLinkApplier(kind BareMetalType, machineUUID string, nicIndex int, nic *models.V1MachineNic, - tmpFile string, evpnIfaces []EVPNIface) (net.Applier, error) { - var mtu int - - switch kind { - case Firewall: - mtu = mtuFirewall - case Machine: - mtu = mtuMachine - default: - return nil, fmt.Errorf("unknown configuratorType of configurator: %d", kind) - } - - data := SystemdLinkData{ - SystemdCommonData: SystemdCommonData{ - Comment: versionHeader(machineUUID), - Index: nicIndex, - }, - MTU: mtu, - MAC: *nic.Mac, - EVPNIfaces: evpnIfaces, - } - validator := systemdValidator{tmpFile} - - return net.NewNetworkApplier(data, validator, nil), nil -} - -// Validate validates systemd.network and systemd.link files. -func (v systemdValidator) Validate() error { - return nil -} diff --git a/pkg/network/tailscale.go b/pkg/network/tailscale.go deleted file mode 100644 index c3e5eee..0000000 --- a/pkg/network/tailscale.go +++ /dev/null @@ -1,37 +0,0 @@ -package network - -import ( - "github.com/metal-stack/os-installer/pkg/net" -) - -const ( - // tplTailscale is the name of the template for the Tailscale client. - tplTailscale = "tailscale.service.tpl" - // systemdUnitTailscale is the name of the systemd unit for the Tailscale client. - systemdUnitTailscale = "tailscale.service" -) - -// TailscaleData contains the data to render the Tailscale service template. -type TailscaleData struct { - MachineID string - AuthKey string - Address string - DefaultRouteVrf string -} - -// newTailscaleServiceApplier constructs a new instance of this type. -func newTailscaleServiceApplier(kb config, v net.Validator) (net.Applier, error) { - defaultRouteVrf, err := kb.getDefaultRouteVRFName() - if err != nil { - return nil, err - } - - data := TailscaleData{ - MachineID: kb.MachineUUID, - AuthKey: *kb.VPN.AuthKey, - Address: *kb.VPN.Address, - DefaultRouteVrf: defaultRouteVrf, - } - - return net.NewNetworkApplier(data, v, nil), nil -} diff --git a/pkg/network/tailscaled.go b/pkg/network/tailscaled.go deleted file mode 100644 index f5e67ae..0000000 --- a/pkg/network/tailscaled.go +++ /dev/null @@ -1,31 +0,0 @@ -package network - -import ( - "github.com/metal-stack/os-installer/pkg/net" -) - -const ( - // tplTailscaled is the name of the template for the tailscaled service. - tplTailscaled = "tailscaled.service.tpl" - // systemdUnitTailscaled is the name of the systemd unit for the tailscaled. - systemdUnitTailscaled = "tailscaled.service" - defaultTailscaledPort = "41641" -) - -// TailscaledData contains the data to render the tailscaled service template. -type TailscaledData struct { - TailscaledPort string - DefaultRouteVrf string -} - -// newTailscaledServiceApplier constructs a new instance of this type. -func newTailscaledServiceApplier(kb config, v net.Validator) (net.Applier, error) { - defaultRouteVrf, err := kb.getDefaultRouteVRFName() - if err != nil { - return nil, err - } - - data := TailscaledData{TailscaledPort: defaultTailscaledPort, DefaultRouteVrf: defaultRouteVrf} - - return net.NewNetworkApplier(data, v, nil), nil -} diff --git a/pkg/network/template.go b/pkg/network/template.go deleted file mode 100644 index 6545a7e..0000000 --- a/pkg/network/template.go +++ /dev/null @@ -1,23 +0,0 @@ -package network - -import ( - "embed" - "path" - "text/template" -) - -//go:embed tpl -var templates embed.FS - -func mustReadTpl(tplName string) string { - contents, err := templates.ReadFile(path.Join("tpl", tplName)) - if err != nil { - panic(err) - } - return string(contents) -} - -func MustParseTpl(tplName string) *template.Template { - s := mustReadTpl(tplName) - return template.Must(template.New(tplName).Parse(string(s))) -} diff --git a/pkg/network/testdata/firewall.yaml b/pkg/network/testdata/firewall.yaml deleted file mode 100644 index d8f19c3..0000000 --- a/pkg/network/testdata/firewall.yaml +++ /dev/null @@ -1,182 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 10.0.16.2 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.16.0/22 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - # === Private shared networks to route to - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # Applied to the SVI (as /32) - ips: - - 10.0.18.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: storage-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.18.0/22 - private: true - underlay: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3982 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 185.1.2.3 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 185.1.2.0/24 - - 185.27.0.0/22 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - # === Underlay Network (underlay=true) - # Considered to define the BGP ASN. - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - privateprimary: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 - - asn: 4200003073 - # considered to figure out allowed prefixes for route imports from public network into tenant network - destinationprefixes: - - 100.127.1.0/24 - # Applied to local loopback as /32. - ips: - - 100.127.129.1 - nat: true - networkid: mpls-nbg-w8101-test - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 100.127.129.0/24 - private: false - underlay: false - networktype: external - vrf: 104010 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] - - - - diff --git a/pkg/network/testdata/firewall_dmz.yaml b/pkg/network/testdata/firewall_dmz.yaml deleted file mode 100644 index cb7e76c..0000000 --- a/pkg/network/testdata/firewall_dmz.yaml +++ /dev/null @@ -1,164 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 10.0.16.2 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.16.0/22 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - - asn: 4200003073 - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 10.0.20.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: dmz-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.20.0/22 - private: true - underlay: false - privateprimary: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3983 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 185.1.2.3 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 185.1.2.0/24 - - 185.27.0.0/22 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - # === Underlay Network (underlay=true) - # Considered to define the BGP ASN. - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] - - - - diff --git a/pkg/network/testdata/firewall_dmz_app.yaml b/pkg/network/testdata/firewall_dmz_app.yaml deleted file mode 100644 index 414ece6..0000000 --- a/pkg/network/testdata/firewall_dmz_app.yaml +++ /dev/null @@ -1,141 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 10.0.16.2 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.16.0/22 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - - asn: 4200003073 - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 10.0.20.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: dmz-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.20.0/22 - private: true - underlay: false - privateprimary: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3983 - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] - - - - diff --git a/pkg/network/testdata/firewall_dmz_app_storage.yaml b/pkg/network/testdata/firewall_dmz_app_storage.yaml deleted file mode 100644 index 71af69b..0000000 --- a/pkg/network/testdata/firewall_dmz_app_storage.yaml +++ /dev/null @@ -1,160 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 10.0.16.2 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.16.0/22 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - - asn: 4200003073 - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 10.0.20.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: dmz-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.20.0/22 - private: true - underlay: false - privateprimary: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3983 - # === Private shared networks to route to - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # Applied to the SVI (as /32) - ips: - - 10.0.18.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: storage-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.18.0/22 - private: true - underlay: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3982 - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] - - - - diff --git a/pkg/network/testdata/firewall_dualstack.yaml b/pkg/network/testdata/firewall_dualstack.yaml deleted file mode 100644 index 32c48bb..0000000 --- a/pkg/network/testdata/firewall_dualstack.yaml +++ /dev/null @@ -1,183 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 2002::1 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 2002::/64 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - # === Private shared networks to route to - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # Applied to the SVI (as /32) - ips: - - 10.0.18.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: storage-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.18.0/22 - private: true - underlay: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3982 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - ::/0 - # Applied to the SVI (as /32) - ips: - - 2a02:c00:20::1 - - 185.1.2.3 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 185.1.2.0/24 - - 2a02:c00:20::/45 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - # === Underlay Network (underlay=true) - # Considered to define the BGP ASN. - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - privateprimary: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 - - asn: 4200003073 - # considered to figure out allowed prefixes for route imports from public network into tenant network - destinationprefixes: - - 100.127.1.0/24 - # Applied to local loopback as /32. - ips: - - 100.127.129.1 - nat: true - networkid: mpls-nbg-w8101-test - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 100.127.129.0/24 - private: false - underlay: false - networktype: external - vrf: 104010 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] - - - - diff --git a/pkg/network/testdata/firewall_ipv6.yaml b/pkg/network/testdata/firewall_ipv6.yaml deleted file mode 100644 index 6f9aec1..0000000 --- a/pkg/network/testdata/firewall_ipv6.yaml +++ /dev/null @@ -1,181 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 2002::1 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 2002::/64 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - # === Private shared networks to route to - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # Applied to the SVI (as /32) - ips: - - 10.0.18.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: storage-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.18.0/22 - private: true - underlay: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3982 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - ::/0 - # Applied to the SVI (as /32) - ips: - - 2a02:c00:20::1 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 2a02:c00:20::/45 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - # === Underlay Network (underlay=true) - # Considered to define the BGP ASN. - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - privateprimary: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 - - asn: 4200003073 - # considered to figure out allowed prefixes for route imports from public network into tenant network - destinationprefixes: - - 100.127.1.0/24 - # Applied to local loopback as /32. - ips: - - 100.127.129.1 - nat: true - networkid: mpls-nbg-w8101-test - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 100.127.129.0/24 - private: false - underlay: false - networktype: external - vrf: 104010 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] - - - - diff --git a/pkg/network/testdata/firewall_shared.yaml b/pkg/network/testdata/firewall_shared.yaml deleted file mode 100644 index fec137f..0000000 --- a/pkg/network/testdata/firewall_shared.yaml +++ /dev/null @@ -1,141 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # Applied to the SVI (as /32) - ips: - - 10.0.18.2 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: storage-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.18.0/22 - private: true - underlay: false - privateprimary: true - networktype: privateprimaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3982 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 185.1.2.3 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 185.1.2.0/24 - - 185.27.0.0/22 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - # === Underlay Network (underlay=true) - # Considered to define the BGP ASN. - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] - - - - diff --git a/pkg/network/testdata/firewall_vpn.yaml b/pkg/network/testdata/firewall_vpn.yaml deleted file mode 100644 index f2aed3d..0000000 --- a/pkg/network/testdata/firewall_vpn.yaml +++ /dev/null @@ -1,184 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 10.0.16.2 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.16.0/22 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - # === Private shared networks to route to - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # Applied to the SVI (as /32) - ips: - - 10.0.18.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: storage-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.18.0/22 - private: true - underlay: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3982 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 185.1.2.3 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 185.1.2.0/24 - - 185.27.0.0/22 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - # === Underlay Network (underlay=true) - # Considered to define the BGP ASN. - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - privateprimary: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 - - asn: 4200003073 - # considered to figure out allowed prefixes for route imports from public network into tenant network - destinationprefixes: - - 100.127.1.0/24 - # Applied to local loopback as /32. - ips: - - 100.127.129.1 - nat: true - networkid: mpls-nbg-w8101-test - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 100.127.129.0/24 - private: false - underlay: false - networktype: external - vrf: 104010 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] -vpn: - address: https://test.test.dev - auth_key: abracadabra - - - diff --git a/pkg/network/testdata/firewall_with_rules.yaml b/pkg/network/testdata/firewall_with_rules.yaml deleted file mode 100644 index 954b125..0000000 --- a/pkg/network/testdata/firewall_with_rules.yaml +++ /dev/null @@ -1,213 +0,0 @@ -# Note: This is a general-purpose configuration file that contains information not only for this app. -# -# This file is considered to be used to configure the tenant firewall! -# -########################################### -# root@firewall:/etc/metal# date -# Thu May 16 13:48:11 CEST 2019 -# root@firewall:/etc/metal# cat install.yaml -# hostname: firewall -# ipaddress: 10.0.12.1 -# asn: "4200003073" -# networks: -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.0.12.1 -# nat: false -# networkid: bc830818-2df1-4904-8c40-4322296d393d -# prefixes: -# - 10.0.12.0/22 -# private: true -# underlay: false -# vrf: 3981 -# - asn: 4200003073 -# destinationprefixes: -# - 0.0.0.0/0 -# ips: -# - 185.24.0.1 -# nat: false -# networkid: internet-vagrant-lab -# prefixes: -# - 185.24.0.0/22 -# - 185.27.0.0/22 -# private: false -# underlay: false -# vrf: 104009 -# - asn: 4200003073 -# destinationprefixes: [] -# ips: -# - 10.1.0.1 -# nat: false -# networkid: underlay-vagrant-lab -# prefixes: -# - 10.0.12.0/22 -# private: false -# underlay: true -# vrf: 0 -# machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# sshpublickey: "" -# password: KAWT5DugqSPAezMl -# devmode: false -# console: ttyS0,115200n8 -########################################### ---- -# Applies to hostname of the firewall. -hostname: firewall -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Firewall: Used to consider the set of prefixes that originate the given IP's to establish route leak in public - # network VRF's for return traffic. Applied to the SVI (as /32) - # For Machine: Used to set the loopback ips. - ips: - - 10.0.16.2 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.16.0/22 - private: true - underlay: false - networktype: privateprimaryunshared - # [IGNORED in case of private network] - # Defines the tenant VRF id. - vrf: 3981 - # === Private shared networks to route to - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # Applied to the SVI (as /32) - ips: - - 10.0.18.2 - # In case nat equals true, Source NAT via SVI is added. - nat: false - networkid: storage-net - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.18.0/22 - private: true - underlay: false - networktype: privatesecondaryshared - # VRF id considered to define EVPN interfaces. - vrf: 3982 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - 0.0.0.0/0 - # Applied to the SVI (as /32) - ips: - - 185.1.2.3 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 185.1.2.0/24 - - 185.27.0.0/22 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - # === Underlay Network (underlay=true) - # Considered to define the BGP ASN. - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: [] - # Applied to local loopback as /32. - ips: - - 10.1.0.1 - nat: false - networkid: underlay-vagrant-lab - # [IGNORED in case of UNDERLAY] - prefixes: - - 10.0.12.0/22 - private: false - privateprimary: false - underlay: true - networktype: underlay - # [IGNORED] Underlay runs in default VRF. - vrf: 0 - - asn: 4200003073 - # considered to figure out allowed prefixes for route imports from public network into tenant network - destinationprefixes: - - 100.127.1.0/24 - # Applied to local loopback as /32. - ips: - - 100.127.129.1 - nat: true - networkid: mpls-nbg-w8101-test - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 100.127.129.0/24 - private: false - underlay: false - networktype: external - vrf: 104010 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] -firewall_rules: - egress: - - comment: "allow apt update" - protocol: tcp - ports: [443] - to: - - "0.0.0.0/0" - - "1.2.3.4/32" - - comment: "allow apt update v6" - protocol: tcp - ports: [443] - to: - - "::/0" - ingress: - - protocol: TCP - ports: [22] - from: - - "2.3.4.0/24" - - "192.168.1.0/16" - to: - - "100.1.2.3/32" - - "100.1.2.4/32" - comment: "allow incoming ssh" - - protocol: TCP - ports: [22] - from: - - 2001:db8::1/128 - to: - - 2001:db8:0:113::/64 - comment: "allow incoming ssh ipv6" - - protocol: TCP - ports: [80,443,8080] - from: - - "1.2.3.0/24" - - "192.168.0.0/16" diff --git a/pkg/network/testdata/frr.conf.firewall_dmz b/pkg/network/testdata/frr.conf.firewall_dmz deleted file mode 100644 index 35fdfc8..0000000 --- a/pkg/network/testdata/frr.conf.firewall_dmz +++ /dev/null @@ -1,180 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -frr version 8.5 -frr defaults datacenter -hostname firewall -! -log syslog debugging -debug bgp updates -debug bgp nht -debug bgp update-groups -debug bgp zebra -! -vrf vrf3981 - vni 3981 - exit-vrf -! -vrf vrf3983 - vni 3983 - exit-vrf -! -vrf vrf104009 - vni 104009 - exit-vrf -! -interface lan0 - ipv6 nd ra-interval 6 - no ipv6 nd suppress-ra -! -interface lan1 - ipv6 nd ra-interval 6 - no ipv6 nd suppress-ra -! -router bgp 4200003073 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - neighbor FABRIC peer-group - neighbor FABRIC remote-as external - neighbor FABRIC timers 2 8 - neighbor lan0 interface peer-group FABRIC - neighbor lan1 interface peer-group FABRIC - ! - address-family ipv4 unicast - redistribute connected route-map LOOPBACKS - neighbor FABRIC route-map only-self-out out - exit-address-family - ! - address-family ipv6 unicast - redistribute connected route-map LOOPBACKS - neighbor FABRIC route-map only-self-out out - neighbor FABRIC activate - exit-address-family - ! - address-family l2vpn evpn - neighbor FABRIC activate - advertise-all-vni - exit-address-family -! -router bgp 4200003073 vrf vrf3981 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf104009 - import vrf vrf3983 - import vrf route-map vrf3981-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf104009 - import vrf vrf3983 - import vrf route-map vrf3981-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -router bgp 4200003073 vrf vrf3983 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf3981 - import vrf vrf104009 - import vrf route-map vrf3983-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf3981 - import vrf vrf104009 - import vrf route-map vrf3983-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -router bgp 4200003073 vrf vrf104009 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf3981 - import vrf vrf3983 - import vrf route-map vrf104009-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf3981 - import vrf vrf3983 - import vrf route-map vrf104009-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -ip prefix-list vrf3981-import-from-vrf104009 permit 0.0.0.0/0 -ip prefix-list vrf3981-import-from-vrf104009 seq 101 deny 185.1.2.3/32 le 32 -ip prefix-list vrf3981-import-from-vrf104009 seq 102 permit 185.1.2.0/24 le 32 -ip prefix-list vrf3981-import-from-vrf104009 seq 103 permit 185.27.0.0/22 le 32 -ip prefix-list vrf3981-import-from-vrf3983 seq 104 permit 10.0.20.0/22 le 32 -route-map vrf3981-import-map permit 10 - match source-vrf vrf3983 - match ip address prefix-list vrf3981-import-from-vrf3983 -route-map vrf3981-import-map permit 20 - match source-vrf vrf104009 - match ip address prefix-list vrf3981-import-from-vrf104009 -route-map vrf3981-import-map deny 30 -! -ip prefix-list vrf3983-import-from-vrf3981 seq 100 permit 10.0.16.0/22 le 32 -ip prefix-list vrf3983-import-from-vrf3981 seq 101 permit 10.0.20.0/22 le 32 -ip prefix-list vrf3983-import-from-vrf104009 permit 0.0.0.0/0 -ip prefix-list vrf3983-import-from-vrf104009 seq 103 permit 185.1.2.0/24 le 32 -ip prefix-list vrf3983-import-from-vrf104009 seq 104 permit 185.27.0.0/22 le 32 -route-map vrf3983-import-map permit 10 - match source-vrf vrf3981 - match ip address prefix-list vrf3983-import-from-vrf3981 -route-map vrf3983-import-map permit 20 - match source-vrf vrf104009 - match ip address prefix-list vrf3983-import-from-vrf104009 -route-map vrf3983-import-map deny 30 -! -ip prefix-list vrf104009-import-from-vrf3981-no-export seq 100 permit 10.0.16.0/22 le 32 -ip prefix-list vrf104009-import-from-vrf3983-no-export seq 101 permit 10.0.20.0/22 le 32 -ip prefix-list vrf104009-import-from-vrf3981 seq 102 permit 185.1.2.0/24 le 32 -ip prefix-list vrf104009-import-from-vrf3981 seq 103 permit 185.27.0.0/22 le 32 -route-map vrf104009-import-map permit 10 - match source-vrf vrf3983 - match ip address prefix-list vrf104009-import-from-vrf3983-no-export - set community additive no-export -route-map vrf104009-import-map permit 20 - match source-vrf vrf3981 - match ip address prefix-list vrf104009-import-from-vrf3981-no-export - set community additive no-export -route-map vrf104009-import-map permit 30 - match source-vrf vrf3981 - match ip address prefix-list vrf104009-import-from-vrf3981 -route-map vrf104009-import-map deny 40 -! -route-map only-self-out permit 10 - match as-path SELF -route-map only-self-out deny 20 -! -route-map LOOPBACKS permit 10 - match interface lo -! -bgp as-path access-list SELF permit ^$ -! -line vty -! \ No newline at end of file diff --git a/pkg/network/testdata/frr.conf.firewall_dmz_app b/pkg/network/testdata/frr.conf.firewall_dmz_app deleted file mode 100644 index 0c6c82c..0000000 --- a/pkg/network/testdata/frr.conf.firewall_dmz_app +++ /dev/null @@ -1,121 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -frr version 8.5 -frr defaults datacenter -hostname firewall -! -log syslog debugging -debug bgp updates -debug bgp nht -debug bgp update-groups -debug bgp zebra -! -vrf vrf3981 - vni 3981 - exit-vrf -! -vrf vrf3983 - vni 3983 - exit-vrf -! -interface lan0 - ipv6 nd ra-interval 6 - no ipv6 nd suppress-ra -! -interface lan1 - ipv6 nd ra-interval 6 - no ipv6 nd suppress-ra -! -router bgp 4200003073 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - neighbor FABRIC peer-group - neighbor FABRIC remote-as external - neighbor FABRIC timers 2 8 - neighbor lan0 interface peer-group FABRIC - neighbor lan1 interface peer-group FABRIC - ! - address-family ipv4 unicast - redistribute connected route-map LOOPBACKS - neighbor FABRIC route-map only-self-out out - exit-address-family - ! - address-family ipv6 unicast - redistribute connected route-map LOOPBACKS - neighbor FABRIC route-map only-self-out out - neighbor FABRIC activate - exit-address-family - ! - address-family l2vpn evpn - neighbor FABRIC activate - advertise-all-vni - exit-address-family -! -router bgp 4200003073 vrf vrf3981 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf3983 - import vrf route-map vrf3981-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf3983 - import vrf route-map vrf3981-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -router bgp 4200003073 vrf vrf3983 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf3981 - import vrf route-map vrf3983-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf3981 - import vrf route-map vrf3983-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -ip prefix-list vrf3981-import-from-vrf3983 seq 100 deny 10.0.20.2/32 le 32 -ip prefix-list vrf3981-import-from-vrf3983 seq 101 permit 10.0.20.0/22 le 32 -ip prefix-list vrf3981-import-from-vrf3983 permit 0.0.0.0/0 -route-map vrf3981-import-map permit 10 - match source-vrf vrf3983 - match ip address prefix-list vrf3981-import-from-vrf3983 -route-map vrf3981-import-map deny 20 -! -ip prefix-list vrf3983-import-from-vrf3981 seq 100 permit 10.0.16.0/22 le 32 -ip prefix-list vrf3983-import-from-vrf3981 seq 101 permit 10.0.20.0/22 le 32 -route-map vrf3983-import-map permit 10 - match source-vrf vrf3981 - match ip address prefix-list vrf3983-import-from-vrf3981 -route-map vrf3983-import-map deny 20 -! -route-map only-self-out permit 10 - match as-path SELF -route-map only-self-out deny 20 -! -route-map LOOPBACKS permit 10 - match interface lo -! -bgp as-path access-list SELF permit ^$ -! -line vty -! \ No newline at end of file diff --git a/pkg/network/testdata/frr.conf.firewall_dmz_app_storage b/pkg/network/testdata/frr.conf.firewall_dmz_app_storage deleted file mode 100644 index a9c951d..0000000 --- a/pkg/network/testdata/frr.conf.firewall_dmz_app_storage +++ /dev/null @@ -1,159 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -frr version 8.5 -frr defaults datacenter -hostname firewall -! -log syslog debugging -debug bgp updates -debug bgp nht -debug bgp update-groups -debug bgp zebra -! -vrf vrf3981 - vni 3981 - exit-vrf -! -vrf vrf3983 - vni 3983 - exit-vrf -! -vrf vrf3982 - vni 3982 - exit-vrf -! -interface lan0 - ipv6 nd ra-interval 6 - no ipv6 nd suppress-ra -! -interface lan1 - ipv6 nd ra-interval 6 - no ipv6 nd suppress-ra -! -router bgp 4200003073 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - neighbor FABRIC peer-group - neighbor FABRIC remote-as external - neighbor FABRIC timers 2 8 - neighbor lan0 interface peer-group FABRIC - neighbor lan1 interface peer-group FABRIC - ! - address-family ipv4 unicast - redistribute connected route-map LOOPBACKS - neighbor FABRIC route-map only-self-out out - exit-address-family - ! - address-family ipv6 unicast - redistribute connected route-map LOOPBACKS - neighbor FABRIC route-map only-self-out out - neighbor FABRIC activate - exit-address-family - ! - address-family l2vpn evpn - neighbor FABRIC activate - advertise-all-vni - exit-address-family -! -router bgp 4200003073 vrf vrf3981 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf3983 - import vrf vrf3982 - import vrf route-map vrf3981-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf3983 - import vrf vrf3982 - import vrf route-map vrf3981-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -router bgp 4200003073 vrf vrf3983 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf3981 - import vrf route-map vrf3983-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf3981 - import vrf route-map vrf3983-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -router bgp 4200003073 vrf vrf3982 - bgp router-id 10.1.0.1 - bgp bestpath as-path multipath-relax - ! - address-family ipv4 unicast - redistribute connected - import vrf vrf3981 - import vrf route-map vrf3982-import-map - exit-address-family - ! - address-family ipv6 unicast - redistribute connected - import vrf vrf3981 - import vrf route-map vrf3982-import-map - exit-address-family - ! - address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast - exit-address-family -! -ip prefix-list vrf3981-import-from-vrf3983 seq 100 deny 10.0.20.2/32 le 32 -ip prefix-list vrf3981-import-from-vrf3983 seq 101 permit 10.0.20.0/22 le 32 -ip prefix-list vrf3981-import-from-vrf3982 seq 102 permit 10.0.18.0/22 le 32 -ip prefix-list vrf3981-import-from-vrf3983 permit 0.0.0.0/0 -route-map vrf3981-import-map permit 10 - match source-vrf vrf3983 - match ip address prefix-list vrf3981-import-from-vrf3983 -route-map vrf3981-import-map permit 20 - match source-vrf vrf3982 - match ip address prefix-list vrf3981-import-from-vrf3982 -route-map vrf3981-import-map deny 30 -! -ip prefix-list vrf3983-import-from-vrf3981 seq 100 permit 10.0.16.0/22 le 32 -ip prefix-list vrf3983-import-from-vrf3981 seq 101 permit 10.0.20.0/22 le 32 -route-map vrf3983-import-map permit 10 - match source-vrf vrf3981 - match ip address prefix-list vrf3983-import-from-vrf3981 -route-map vrf3983-import-map deny 20 -! -ip prefix-list vrf3982-import-from-vrf3981 seq 100 permit 10.0.16.0/22 le 32 -ip prefix-list vrf3982-import-from-vrf3981 seq 101 permit 10.0.18.0/22 le 32 -route-map vrf3982-import-map permit 10 - match source-vrf vrf3981 - match ip address prefix-list vrf3982-import-from-vrf3981 -route-map vrf3982-import-map deny 20 -! -route-map only-self-out permit 10 - match as-path SELF -route-map only-self-out deny 20 -! -route-map LOOPBACKS permit 10 - match interface lo -! -bgp as-path access-list SELF permit ^$ -! -line vty -! \ No newline at end of file diff --git a/pkg/network/testdata/hostname b/pkg/network/testdata/hostname deleted file mode 100644 index d565e06..0000000 --- a/pkg/network/testdata/hostname +++ /dev/null @@ -1 +0,0 @@ -firewall \ No newline at end of file diff --git a/pkg/network/testdata/hosts b/pkg/network/testdata/hosts deleted file mode 100644 index aed7680..0000000 --- a/pkg/network/testdata/hosts +++ /dev/null @@ -1,4 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -127.0.0.1 localhost -10.0.16.2 firewall diff --git a/pkg/network/testdata/machine.yaml b/pkg/network/testdata/machine.yaml deleted file mode 100644 index dd4fddb..0000000 --- a/pkg/network/testdata/machine.yaml +++ /dev/null @@ -1,84 +0,0 @@ ---- -hostname: machine -networks: - # === Tenant Network (private=true) - # [IGNORED] - - asn: 4200003073 - # [IGNORED in case of private network] - destinationprefixes: [] - # For Machine: Used to set the loopback ips. - ips: - - 10.0.17.2 - # [IGNORED in case of private network] - nat: false - # [IGNORED in case of private network] - networkid: bc830818-2df1-4904-8c40-4322296d393d - # considered as source range for nat and to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 10.0.16.0/22 - private: true - # [IGNORED in case of private network] - underlay: false - networktype: privateprimaryunshared - # Defines the tenant VRF id. - vrf: 3981 - # === Public networks to route to - # [IGNORED] - - asn: 4200003073 - # Considered to establish static route leak to reach out from tenant VRF into the public networks. - destinationprefixes: - - 0.0.0.0/0 - # For Machine: Used to set the loopback ips. - ips: - - 185.1.2.3 - # In case nat equals true, Source NAT via SVI is added. - nat: true - networkid: internet-vagrant-lab - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 185.1.2.0/24 - - 185.27.0.0/22 - private: false - underlay: false - networktype: external - # VRF id considered to define EVPN interfaces. - vrf: 104009 - - asn: 4200003073 - # considered to figure out allowed prefixes for route imports from public network into tenant network - destinationprefixes: - - 100.127.1.0/24 - # For Machine: Used to set the loopback ips. - ips: - - 100.127.129.1 - nat: true - networkid: mpls-nbg-w8101-test - # considered to figure out allowed prefixes for route imports from private network into non-private, non-underlay network - prefixes: - - 100.127.129.0/24 - private: false - underlay: false - networktype: external - vrf: 104010 -machineuuid: e0ab02d2-27cd-5a5e-8efc-080ba80cf258 -# [IGNORED] -sshpublickey: "" -# [IGNORED] -password: KAWT5DugqSPAezMl -# [IGNORED] -devmode: false -# [IGNORED] -console: ttyS1,115200n8 -timestamp: "2019-07-01T09:41:43Z" -nics: - - mac: "00:03:00:11:11:01" - name: lan0 - neighbors: - - mac: 44:38:39:00:00:1a - name: null - neighbors: [] - - mac: "00:03:00:11:12:01" - name: lan1 - neighbors: - - mac: "44:38:39:00:00:04" - name: null - neighbors: [] diff --git a/pkg/network/testdata/networkd/firewall/10-lan0.link b/pkg/network/testdata/networkd/firewall/10-lan0.link deleted file mode 100644 index 6b00713..0000000 --- a/pkg/network/testdata/networkd/firewall/10-lan0.link +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -PermanentMACAddress=00:03:00:11:11:01 - -[Link] -Name=lan0 -NamePolicy= -MTUBytes=9216 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/10-lan0.network b/pkg/network/testdata/networkd/firewall/10-lan0.network deleted file mode 100644 index 1232fed..0000000 --- a/pkg/network/testdata/networkd/firewall/10-lan0.network +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -Name=lan0 - -[Network] -IPv6AcceptRA=no -VXLAN=vni3981 -VXLAN=vni3982 -VXLAN=vni104009 -VXLAN=vni104010 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/11-lan1.link b/pkg/network/testdata/networkd/firewall/11-lan1.link deleted file mode 100644 index 348f26f..0000000 --- a/pkg/network/testdata/networkd/firewall/11-lan1.link +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -PermanentMACAddress=00:03:00:11:12:01 - -[Link] -Name=lan1 -NamePolicy= -MTUBytes=9216 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/11-lan1.network b/pkg/network/testdata/networkd/firewall/11-lan1.network deleted file mode 100644 index a17badb..0000000 --- a/pkg/network/testdata/networkd/firewall/11-lan1.network +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -Name=lan1 - -[Network] -IPv6AcceptRA=no -VXLAN=vni3981 -VXLAN=vni3982 -VXLAN=vni104009 -VXLAN=vni104010 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/20-bridge.netdev b/pkg/network/testdata/networkd/firewall/20-bridge.netdev deleted file mode 100644 index 186b556..0000000 --- a/pkg/network/testdata/networkd/firewall/20-bridge.netdev +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[NetDev] -Name=bridge -Kind=bridge -MTUBytes=9000 - -[Bridge] -DefaultPVID=none -VLANFiltering=yes diff --git a/pkg/network/testdata/networkd/firewall/30-svi-3981.netdev b/pkg/network/testdata/networkd/firewall/30-svi-3981.netdev deleted file mode 100644 index 1c4cb0b..0000000 --- a/pkg/network/testdata/networkd/firewall/30-svi-3981.netdev +++ /dev/null @@ -1,7 +0,0 @@ -# svi (networkid: bc830818-2df1-4904-8c40-4322296d393d) -[NetDev] -Name=vlan3981 -Kind=vlan - -[VLAN] -Id=1000 diff --git a/pkg/network/testdata/networkd/firewall/30-vrf-3981.netdev b/pkg/network/testdata/networkd/firewall/30-vrf-3981.netdev deleted file mode 100644 index f0ef0cd..0000000 --- a/pkg/network/testdata/networkd/firewall/30-vrf-3981.netdev +++ /dev/null @@ -1,7 +0,0 @@ -# vrf (networkid: bc830818-2df1-4904-8c40-4322296d393d) -[NetDev] -Name=vrf3981 -Kind=vrf - -[VRF] -Table=1000 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/30-vrf-3981.network b/pkg/network/testdata/networkd/firewall/30-vrf-3981.network deleted file mode 100644 index 05a2c16..0000000 --- a/pkg/network/testdata/networkd/firewall/30-vrf-3981.network +++ /dev/null @@ -1,3 +0,0 @@ -# vrf (networkid: bc830818-2df1-4904-8c40-4322296d393d) -[Match] -Name=vrf3981 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/31-svi-3982.netdev b/pkg/network/testdata/networkd/firewall/31-svi-3982.netdev deleted file mode 100644 index c82b275..0000000 --- a/pkg/network/testdata/networkd/firewall/31-svi-3982.netdev +++ /dev/null @@ -1,7 +0,0 @@ -# svi (networkid: storage-net) -[NetDev] -Name=vlan3982 -Kind=vlan - -[VLAN] -Id=1001 diff --git a/pkg/network/testdata/networkd/firewall/31-vrf-3982.netdev b/pkg/network/testdata/networkd/firewall/31-vrf-3982.netdev deleted file mode 100644 index 0005eb6..0000000 --- a/pkg/network/testdata/networkd/firewall/31-vrf-3982.netdev +++ /dev/null @@ -1,7 +0,0 @@ -# vrf (networkid: storage-net) -[NetDev] -Name=vrf3982 -Kind=vrf - -[VRF] -Table=1001 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/31-vrf-3982.network b/pkg/network/testdata/networkd/firewall/31-vrf-3982.network deleted file mode 100644 index f328ca1..0000000 --- a/pkg/network/testdata/networkd/firewall/31-vrf-3982.network +++ /dev/null @@ -1,3 +0,0 @@ -# vrf (networkid: storage-net) -[Match] -Name=vrf3982 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/32-vrf-104009.netdev b/pkg/network/testdata/networkd/firewall/32-vrf-104009.netdev deleted file mode 100644 index f81e30f..0000000 --- a/pkg/network/testdata/networkd/firewall/32-vrf-104009.netdev +++ /dev/null @@ -1,7 +0,0 @@ -# vrf (networkid: internet-vagrant-lab) -[NetDev] -Name=vrf104009 -Kind=vrf - -[VRF] -Table=1002 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/32-vrf-104009.network b/pkg/network/testdata/networkd/firewall/32-vrf-104009.network deleted file mode 100644 index 760c0a2..0000000 --- a/pkg/network/testdata/networkd/firewall/32-vrf-104009.network +++ /dev/null @@ -1,3 +0,0 @@ -# vrf (networkid: internet-vagrant-lab) -[Match] -Name=vrf104009 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/33-vrf-104010.netdev b/pkg/network/testdata/networkd/firewall/33-vrf-104010.netdev deleted file mode 100644 index 0d851b6..0000000 --- a/pkg/network/testdata/networkd/firewall/33-vrf-104010.netdev +++ /dev/null @@ -1,7 +0,0 @@ -# vrf (networkid: mpls-nbg-w8101-test) -[NetDev] -Name=vrf104010 -Kind=vrf - -[VRF] -Table=1004 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/firewall/33-vrf-104010.network b/pkg/network/testdata/networkd/firewall/33-vrf-104010.network deleted file mode 100644 index ffe489c..0000000 --- a/pkg/network/testdata/networkd/firewall/33-vrf-104010.network +++ /dev/null @@ -1,3 +0,0 @@ -# vrf (networkid: mpls-nbg-w8101-test) -[Match] -Name=vrf104010 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/machine/10-lan0.link b/pkg/network/testdata/networkd/machine/10-lan0.link deleted file mode 100644 index 498c09d..0000000 --- a/pkg/network/testdata/networkd/machine/10-lan0.link +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -PermanentMACAddress=00:03:00:11:11:01 - -[Link] -Name=lan0 -NamePolicy= -MTUBytes=9000 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/machine/10-lan0.network b/pkg/network/testdata/networkd/machine/10-lan0.network deleted file mode 100644 index 74c29ad..0000000 --- a/pkg/network/testdata/networkd/machine/10-lan0.network +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -Name=lan0 - -[Network] -IPv6AcceptRA=no \ No newline at end of file diff --git a/pkg/network/testdata/networkd/machine/11-lan1.link b/pkg/network/testdata/networkd/machine/11-lan1.link deleted file mode 100644 index 5d15b91..0000000 --- a/pkg/network/testdata/networkd/machine/11-lan1.link +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -PermanentMACAddress=00:03:00:11:12:01 - -[Link] -Name=lan1 -NamePolicy= -MTUBytes=9000 \ No newline at end of file diff --git a/pkg/network/testdata/networkd/machine/11-lan1.network b/pkg/network/testdata/networkd/machine/11-lan1.network deleted file mode 100644 index 79a6cab..0000000 --- a/pkg/network/testdata/networkd/machine/11-lan1.network +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -[Match] -Name=lan1 - -[Network] -IPv6AcceptRA=no \ No newline at end of file diff --git a/pkg/network/testdata/nftrules_dmz b/pkg/network/testdata/nftrules_dmz deleted file mode 100644 index a609824..0000000 --- a/pkg/network/testdata/nftrules_dmz +++ /dev/null @@ -1,91 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -table inet metal { - chain input { - type filter hook input priority 0; policy drop; - meta l4proto ipv6-icmp counter accept comment "icmpv6 input required for neighbor discovery" - iifname "lo" counter accept comment "BGP unnumbered" - iifname "lan0" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered input from lan0" - iifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered input from lan1" - iifname "lan0" ip saddr 10.0.0.0/8 udp dport 4789 counter accept comment "incoming VXLAN lan0" - iifname "lan1" ip saddr 10.0.0.0/8 udp dport 4789 counter accept comment "incoming VXLAN lan1" - - ct state established,related counter accept comment "stateful input" - - ip saddr 10.0.0.0/8 tcp dport domain ip daddr 185.1.2.3 accept comment "dnat to dns proxy" - ip saddr 10.0.0.0/8 udp dport domain ip daddr 185.1.2.3 accept comment "dnat to dns proxy" - - tcp dport ssh ct state new counter accept comment "SSH incoming connections" - iifname "vrf3981" tcp dport 9100 counter accept comment "node metrics" - iifname "vrf3981" tcp dport 9630 counter accept comment "nftables metrics" - iifname "vrf3983" tcp dport 9100 counter accept comment "node metrics" - iifname "vrf3983" tcp dport 9630 counter accept comment "nftables metrics" - - ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" - counter jump refuse - } - chain forward { - type filter hook forward priority 0; policy drop; - ct state invalid counter drop comment "drop invalid packets from forwarding to prevent malicious activity" - ct state established,related counter accept comment "stateful forward" - tcp dport bgp ct state new counter jump refuse comment "block bgp forward to machines" - limit rate 2/minute counter log prefix "nftables-metal-dropped: " - } - chain output { - type filter hook output priority 0; policy accept; - meta l4proto ipv6-icmp counter accept comment "icmpv6 output required for neighbor discovery" - oifname "lo" counter accept comment "lo output required e.g. for chrony" - oifname "lan0" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan0" - oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" - - ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - - ct state established,related counter accept comment "stateful output" - ct state invalid counter drop comment "drop invalid packets" - } - chain output_ct { - type filter hook output priority raw; policy accept; - oifname "vlan3981" tcp sport domain ct zone set 3 - oifname "vlan3981" udp sport domain ct zone set 3 - oifname "vlan3983" tcp sport domain ct zone set 3 - oifname "vlan3983" udp sport domain ct zone set 3 - } - chain refuse { - limit rate 2/minute counter log prefix "nftables-metal-dropped: " - counter drop - } -} -table inet nat { - set proxy_dns_servers { - type ipv4_addr - flags interval - auto-merge - elements = { 8.8.8.8, 8.8.4.4, 1.1.1.1, 1.0.0.1 } - } - - chain prerouting { - type nat hook prerouting priority 0; policy accept; - ip daddr @proxy_dns_servers iifname "vlan3981" tcp dport domain dnat ip to 185.1.2.3 comment "dnat to dns proxy" - ip daddr @proxy_dns_servers iifname "vlan3981" udp dport domain dnat ip to 185.1.2.3 comment "dnat to dns proxy" - ip daddr @proxy_dns_servers iifname "vlan3983" tcp dport domain dnat ip to 185.1.2.3 comment "dnat to dns proxy" - ip daddr @proxy_dns_servers iifname "vlan3983" udp dport domain dnat ip to 185.1.2.3 comment "dnat to dns proxy" - } - chain prerouting_ct { - type filter hook prerouting priority raw; policy accept; - iifname "vlan3981" tcp dport domain ct zone set 3 - iifname "vlan3981" udp dport domain ct zone set 3 - iifname "vlan3983" tcp dport domain ct zone set 3 - iifname "vlan3983" udp dport domain ct zone set 3 - } - chain input { - type nat hook input priority 0; policy accept; - } - chain output { - type nat hook output priority 0; policy accept; - } - chain postrouting { - type nat hook postrouting priority 0; policy accept; - oifname "vlan104009" ip saddr 10.0.16.0/22 ip daddr != 185.1.2.3 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" - oifname "vlan104009" ip saddr 10.0.20.0/22 ip daddr != 185.1.2.3 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" - } -} \ No newline at end of file diff --git a/pkg/network/testdata/nftrules_dmz_app b/pkg/network/testdata/nftrules_dmz_app deleted file mode 100644 index 83bee38..0000000 --- a/pkg/network/testdata/nftrules_dmz_app +++ /dev/null @@ -1,89 +0,0 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. -table inet metal { - chain input { - type filter hook input priority 0; policy drop; - meta l4proto ipv6-icmp counter accept comment "icmpv6 input required for neighbor discovery" - iifname "lo" counter accept comment "BGP unnumbered" - iifname "lan0" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered input from lan0" - iifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered input from lan1" - iifname "lan0" ip saddr 10.0.0.0/8 udp dport 4789 counter accept comment "incoming VXLAN lan0" - iifname "lan1" ip saddr 10.0.0.0/8 udp dport 4789 counter accept comment "incoming VXLAN lan1" - - ct state established,related counter accept comment "stateful input" - - ip saddr 10.0.0.0/8 tcp dport domain ip daddr 10.0.20.2 accept comment "dnat to dns proxy" - ip saddr 10.0.0.0/8 udp dport domain ip daddr 10.0.20.2 accept comment "dnat to dns proxy" - - tcp dport ssh ct state new counter accept comment "SSH incoming connections" - iifname "vrf3981" tcp dport 9100 counter accept comment "node metrics" - iifname "vrf3981" tcp dport 9630 counter accept comment "nftables metrics" - iifname "vrf3983" tcp dport 9100 counter accept comment "node metrics" - iifname "vrf3983" tcp dport 9630 counter accept comment "nftables metrics" - - ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" - counter jump refuse - } - chain forward { - type filter hook forward priority 0; policy drop; - ct state invalid counter drop comment "drop invalid packets from forwarding to prevent malicious activity" - ct state established,related counter accept comment "stateful forward" - tcp dport bgp ct state new counter jump refuse comment "block bgp forward to machines" - limit rate 2/minute counter log prefix "nftables-metal-dropped: " - } - chain output { - type filter hook output priority 0; policy accept; - meta l4proto ipv6-icmp counter accept comment "icmpv6 output required for neighbor discovery" - oifname "lo" counter accept comment "lo output required e.g. for chrony" - oifname "lan0" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan0" - oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" - - ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - - ct state established,related counter accept comment "stateful output" - ct state invalid counter drop comment "drop invalid packets" - } - chain output_ct { - type filter hook output priority raw; policy accept; - oifname "vlan3981" tcp sport domain ct zone set 3 - oifname "vlan3981" udp sport domain ct zone set 3 - oifname "vlan3983" tcp sport domain ct zone set 3 - oifname "vlan3983" udp sport domain ct zone set 3 - } - chain refuse { - limit rate 2/minute counter log prefix "nftables-metal-dropped: " - counter drop - } -} -table inet nat { - set proxy_dns_servers { - type ipv4_addr - flags interval - auto-merge - elements = { 8.8.8.8, 8.8.4.4, 1.1.1.1, 1.0.0.1 } - } - - chain prerouting { - type nat hook prerouting priority 0; policy accept; - ip daddr @proxy_dns_servers iifname "vlan3981" tcp dport domain dnat ip to 10.0.20.2 comment "dnat to dns proxy" - ip daddr @proxy_dns_servers iifname "vlan3981" udp dport domain dnat ip to 10.0.20.2 comment "dnat to dns proxy" - ip daddr @proxy_dns_servers iifname "vlan3983" tcp dport domain dnat ip to 10.0.20.2 comment "dnat to dns proxy" - ip daddr @proxy_dns_servers iifname "vlan3983" udp dport domain dnat ip to 10.0.20.2 comment "dnat to dns proxy" - } - chain prerouting_ct { - type filter hook prerouting priority raw; policy accept; - iifname "vlan3981" tcp dport domain ct zone set 3 - iifname "vlan3981" udp dport domain ct zone set 3 - iifname "vlan3983" tcp dport domain ct zone set 3 - iifname "vlan3983" udp dport domain ct zone set 3 - } - chain input { - type nat hook input priority 0; policy accept; - } - chain output { - type nat hook output priority 0; policy accept; - } - chain postrouting { - type nat hook postrouting priority 0; policy accept; - } -} \ No newline at end of file diff --git a/pkg/network/tpl/hostname.tpl b/pkg/network/tpl/hostname.tpl deleted file mode 100644 index ffce2f3..0000000 --- a/pkg/network/tpl/hostname.tpl +++ /dev/null @@ -1,2 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.HostnameData*/ -}} -{{ .Hostname }} \ No newline at end of file diff --git a/pkg/network/tpl/hosts.tpl b/pkg/network/tpl/hosts.tpl deleted file mode 100644 index 820aad7..0000000 --- a/pkg/network/tpl/hosts.tpl +++ /dev/null @@ -1,4 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.HostsData*/ -}} -{{ .Comment }} -127.0.0.1 localhost -{{ .IP }} {{ .Hostname }} diff --git a/pkg/network/tpl/networkd/00-lo.network.tpl b/pkg/network/tpl/networkd/00-lo.network.tpl deleted file mode 100644 index 5e4d39a..0000000 --- a/pkg/network/tpl/networkd/00-lo.network.tpl +++ /dev/null @@ -1,12 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.IfacesData*/ -}} -{{ .Loopback.Comment }} -[Match] -Name=lo - -[Address] -Address=127.0.0.1/8 -{{- range .Loopback.IPs }} - -[Address] -Address={{ . }} -{{- end }} \ No newline at end of file diff --git a/pkg/network/tpl/networkd/10-lan.link.tpl b/pkg/network/tpl/networkd/10-lan.link.tpl deleted file mode 100644 index 476786b..0000000 --- a/pkg/network/tpl/networkd/10-lan.link.tpl +++ /dev/null @@ -1,9 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.SystemdLinkData*/ -}} -{{ .Comment }} -[Match] -PermanentMACAddress={{ .MAC }} - -[Link] -Name=lan{{ .Index }} -NamePolicy= -MTUBytes={{ .MTU }} \ No newline at end of file diff --git a/pkg/network/tpl/networkd/10-lan.network.tpl b/pkg/network/tpl/networkd/10-lan.network.tpl deleted file mode 100644 index 73fb471..0000000 --- a/pkg/network/tpl/networkd/10-lan.network.tpl +++ /dev/null @@ -1,10 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.IfacesData*/ -}} -{{ .Comment }} -[Match] -Name=lan{{ .Index }} - -[Network] -IPv6AcceptRA=no -{{- range .EVPNIfaces }} -VXLAN=vni{{ .VXLAN.ID }} -{{- end }} \ No newline at end of file diff --git a/pkg/network/tpl/networkd/20-bridge.netdev.tpl b/pkg/network/tpl/networkd/20-bridge.netdev.tpl deleted file mode 100644 index 2fef44c..0000000 --- a/pkg/network/tpl/networkd/20-bridge.netdev.tpl +++ /dev/null @@ -1,10 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.IfacesData*/ -}} -{{ .Comment }} -[NetDev] -Name=bridge -Kind=bridge -MTUBytes=9000 - -[Bridge] -DefaultPVID=none -VLANFiltering=yes diff --git a/pkg/network/tpl/networkd/20-bridge.network.tpl b/pkg/network/tpl/networkd/20-bridge.network.tpl deleted file mode 100644 index 360b48c..0000000 --- a/pkg/network/tpl/networkd/20-bridge.network.tpl +++ /dev/null @@ -1,14 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.IfacesData*/ -}} -{{ .Comment }} -[Match] -Name=bridge - -[Network] -{{- range .EVPNIfaces }} -VLAN=vlan{{ .VRF.ID }} -{{- end }} -{{- range .EVPNIfaces }} - -[BridgeVLAN] -VLAN={{ .SVI.VLANID }} -{{- end }} \ No newline at end of file diff --git a/pkg/network/tpl/networkd/30-svi.netdev.tpl b/pkg/network/tpl/networkd/30-svi.netdev.tpl deleted file mode 100644 index 6aa6826..0000000 --- a/pkg/network/tpl/networkd/30-svi.netdev.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.EVPNIface*/ -}} -{{ .SVI.Comment }} -[NetDev] -Name=vlan{{ .VRF.ID }} -Kind=vlan - -[VLAN] -Id={{ .SVI.VLANID }} diff --git a/pkg/network/tpl/networkd/30-svi.network.tpl b/pkg/network/tpl/networkd/30-svi.network.tpl deleted file mode 100644 index 0ef4c10..0000000 --- a/pkg/network/tpl/networkd/30-svi.network.tpl +++ /dev/null @@ -1,13 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.EVPNIface*/ -}} -{{ .SVI.Comment }} -[Match] -Name=vlan{{ .VRF.ID }} - -[Link] -MTUBytes=9000 - -[Network] -VRF=vrf{{ .VRF.ID }} -{{- range .SVI.Addresses }} -Address={{ . }} -{{- end }} diff --git a/pkg/network/tpl/networkd/30-vrf.netdev.tpl b/pkg/network/tpl/networkd/30-vrf.netdev.tpl deleted file mode 100644 index 282a910..0000000 --- a/pkg/network/tpl/networkd/30-vrf.netdev.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.EVPNIface*/ -}} -{{ .VRF.Comment }} -[NetDev] -Name=vrf{{ .VRF.ID }} -Kind=vrf - -[VRF] -Table={{ .VRF.Table }} \ No newline at end of file diff --git a/pkg/network/tpl/networkd/30-vrf.network.tpl b/pkg/network/tpl/networkd/30-vrf.network.tpl deleted file mode 100644 index a7628dc..0000000 --- a/pkg/network/tpl/networkd/30-vrf.network.tpl +++ /dev/null @@ -1,4 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.EVPNIface*/ -}} -{{ .VRF.Comment }} -[Match] -Name=vrf{{ .VRF.ID }} \ No newline at end of file diff --git a/pkg/network/tpl/networkd/30-vxlan.netdev.tpl b/pkg/network/tpl/networkd/30-vxlan.netdev.tpl deleted file mode 100644 index 68ebf9b..0000000 --- a/pkg/network/tpl/networkd/30-vxlan.netdev.tpl +++ /dev/null @@ -1,12 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.EVPNIface*/ -}} -{{ .VXLAN.Comment }} -[NetDev] -Name=vni{{ .VXLAN.ID }} -Kind=vxlan - -[VXLAN] -VNI={{ .VXLAN.ID }} -Local={{ .VXLAN.TunnelIP }} -UDPChecksum=true -MacLearning=false -DestinationPort=4789 diff --git a/pkg/network/tpl/networkd/30-vxlan.network.tpl b/pkg/network/tpl/networkd/30-vxlan.network.tpl deleted file mode 100644 index a49f111..0000000 --- a/pkg/network/tpl/networkd/30-vxlan.network.tpl +++ /dev/null @@ -1,14 +0,0 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.EVPNIface*/ -}} -{{ .VXLAN.Comment }} -[Match] -Name=vni{{ .VXLAN.ID }} - -[Link] -MTUBytes=9000 - -[Network] -Bridge=bridge - -[BridgeVLAN] -PVID={{ .SVI.VLANID }} -EgressUntagged={{ .SVI.VLANID }} diff --git a/pkg/nftables/nftables.go b/pkg/nftables/nftables.go new file mode 100644 index 0000000..d02cd66 --- /dev/null +++ b/pkg/nftables/nftables.go @@ -0,0 +1,393 @@ +package nftables + +import ( + "context" + "fmt" + "log/slog" + "net/netip" + "os/exec" + "strconv" + "strings" + + "github.com/metal-stack/api/go/enum" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" + "github.com/spf13/afero" + + _ "embed" +) + +const ( + // nftables system service name + serviceName = "nftables.service" + // nftables rules file + nftrulesPath = "/etc/nftables/rules" + + // Set up additional conntrack zone for DNS traffic. + // There was a problem that duplicate packets were registered by conntrack + // when packet was leaking from private VRF to the internet VRF. + // Isolating traffic to special zone solves the problem. + // Zone number(3) was obtained by experiments. + dnsProxyZone = "3" + dnsPort = "domain" + + // ForwardPolicyDrop drops packets which try to go through the forwarding chain + ForwardPolicyDrop = ForwardPolicy("drop") + // ForwardPolicyAccept accepts packets which try to go through the forwarding chain + ForwardPolicyAccept = ForwardPolicy("accept") +) + +var ( + //go:embed nftrules.tpl + templateString string +) + +type ( + Config struct { + Log *slog.Logger + Reload bool + Validate bool + + Network *network.Network + + EnableDNSProxy bool + ForwardPolicy ForwardPolicy + + fs afero.Fs + } + + // ForwardPolicy defines how packets in the forwarding chain are handled, can be either drop or accept. + // drop will be the standard for firewalls which are not managed by kubernetes resources (CWNPs) + ForwardPolicy string + + // NftablesData represents the information required to render nftables configuration. + NftablesData struct { + Comment string + SNAT []snat + DNSProxyDNAT dnat + VPN bool + ForwardPolicy string + FirewallRules *firewallRules + Input input + } + + input struct { + InInterfaces []string + } + + firewallRules struct { + Egress []string + Ingress []string + } + + // snat holds the information required to configure Source NAT. + snat struct { + Comment string + OutInterface string + OutIntSpec addrSpec + SourceSpecs []addrSpec + } + + // dnat holds the information required to configure dnat. + dnat struct { + Comment string + InInterfaces []string + SAddr string + DAddr string + Port string + Zone string + DestSpec addrSpec + } + + addrSpec struct { + AddressFamily string + Address string + } +) + +// Renders renders nftables rules according to the given input data and reloads the service if necessary +func Render(ctx context.Context, cfg *Config) (changed bool, err error) { + cfg.Log.Debug("render nftables configuration") + const comment = "generated by os-installer" + + snat, err := getSNAT(cfg) + if err != nil { + return false, err + } + + firewallRules, err := getFirewallRules(cfg) + if err != nil { + return false, err + } + + data := NftablesData{ + Comment: comment, + SNAT: snat, + ForwardPolicy: string(cfg.ForwardPolicy), + FirewallRules: firewallRules, + Input: getInput(cfg), + VPN: cfg.Network.HasVpn(), + } + + if cfg.EnableDNSProxy { + data.DNSProxyDNAT = getDNSProxyDNAT(cfg) + } + + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: templateString, + Data: data, + Fs: cfg.fs, + Validate: func(path string) error { + if !cfg.Validate { + return nil + } + return validate(cfg, path) + }, + }) + if err != nil { + return false, err + } + + changed, err = r.Render(ctx, nftrulesPath) + if err != nil { + return changed, err + } + + if cfg.Reload && changed { + if err := systemd_renderer.Reload(ctx, cfg.Log, serviceName); err != nil { + return changed, err + } + } + + return +} + +func getInput(cfg *Config) input { + input := input{} + for _, n := range cfg.Network.AllocationNetworks() { + switch n.NetworkType { + case apiv2.NetworkType_NETWORK_TYPE_CHILD, apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED: + input.InInterfaces = append(input.InInterfaces, fmt.Sprintf("vrf%d", n.Vrf)) + } + } + return input +} + +func getSNAT(cfg *Config) ([]snat, error) { + var ( + result []snat + defaultNetwork *apiv2.MachineNetwork + defaultAF string + ) + + defaultNetwork, err := cfg.Network.GetDefaultRouteNetwork() + if err != nil { + return nil, err + } + defaultNetworkName := fmt.Sprintf("vrf%d", defaultNetwork.Vrf) + + ip, _ := netip.ParseAddr(defaultNetwork.Ips[0]) + defaultAF = "ip" + if ip.Is6() { + defaultAF = "ip6" + } + + privatePrimaryPrefixes, err := cfg.Network.PrivatePrimaryNetworksPrefixes() + if err != nil { + return nil, err + } + + for _, n := range cfg.Network.AllocationNetworks() { + switch n.NetworkType { + case apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, apiv2.NetworkType_NETWORK_TYPE_SUPER, apiv2.NetworkType_NETWORK_TYPE_SUPER_NAMESPACED: + continue + } + + if n.NatType != apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE { + continue + } + + cfg.Log.Debug("getSNAT", "network", n.Network) + var ( + sources []addrSpec + cmt = fmt.Sprintf("snat (networkid: %s)", n.Network) + svi = fmt.Sprintf("vlan%d", n.Vrf) + vrf = fmt.Sprintf("vrf%d", n.Vrf) + ) + + for _, pfx := range privatePrimaryPrefixes { + af, err := getAddressFamily(pfx) + if err != nil { + return nil, fmt.Errorf("unable to determine address family: %w", err) + } + + sources = append(sources, addrSpec{ + Address: pfx, + AddressFamily: af, + }) + cfg.Log.Debug("getSNAT", "network", n.Network, "prefixes", pfx, "af", af) + } + + s := snat{ + Comment: cmt, + OutInterface: svi, + SourceSpecs: sources, + } + + if cfg.EnableDNSProxy && (vrf == defaultNetworkName) { + s.OutIntSpec = addrSpec{ + AddressFamily: defaultAF, + Address: defaultNetwork.Ips[0], + } + } + + result = append(result, s) + } + + return result, nil +} + +func getDNSProxyDNAT(cfg *Config) dnat { + svis := []string{} + for _, n := range cfg.Network.AllocationNetworks() { + switch n.NetworkType { + case apiv2.NetworkType_NETWORK_TYPE_CHILD, apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED: + svi := fmt.Sprintf("vlan%d", n.Vrf) + svis = append(svis, svi) + } + + } + + n, err := cfg.Network.GetDefaultRouteNetwork() + if err != nil { + return dnat{} + } + + ip, _ := netip.ParseAddr(n.Ips[0]) + af := "ip" + saddr := "10.0.0.0/8" + daddr := "@proxy_dns_servers" + if ip.Is6() { + af = "ip6" + saddr = "fd00::/8" + daddr = "@proxy_dns_servers_v6" + } + return dnat{ + Comment: "dnat to dns proxy", + InInterfaces: svis, + SAddr: saddr, + DAddr: daddr, + Port: dnsPort, + Zone: dnsProxyZone, + DestSpec: addrSpec{ + AddressFamily: af, + Address: n.Ips[0], + }, + } +} + +func getFirewallRules(cfg *Config) (*firewallRules, error) { + if cfg.Network.FirewallRules() == nil { + return &firewallRules{}, nil + } + var ( + egressRules = []string{"# egress rules specified during firewall creation"} + ingressRules = []string{"# ingress rules specified during firewall creation"} + inputInterfaces = getInput(cfg) + quotedInputInterfaces []string + ) + for _, i := range inputInterfaces.InInterfaces { + quotedInputInterfaces = append(quotedInputInterfaces, "\""+i+"\"") + } + + for _, r := range cfg.Network.FirewallRules().Egress { + ports := make([]string, len(r.Ports)) + for i, v := range r.Ports { + ports[i] = strconv.Itoa(int(v)) + } + for _, daddr := range r.To { + af, err := getAddressFamily(daddr) + if err != nil { + return nil, err + } + protocolString, err := enum.GetStringValue(r.Protocol) + if err != nil { + return nil, err + } + egressRules = append(egressRules, + fmt.Sprintf("iifname { %s } %s daddr %s %s dport { %s } counter accept comment %q", strings.Join(quotedInputInterfaces, ","), af, daddr, strings.ToLower(*protocolString), strings.Join(ports, ","), r.Comment)) + } + } + + privatePrimaryNetwork, err := cfg.Network.PrivatePrimaryNetwork() + if err != nil { + return nil, err + } + outputInterfacenames := fmt.Sprintf("oifname { \"vrf%d\", \"vni%d\", \"vlan%d\" }", privatePrimaryNetwork.Vrf, privatePrimaryNetwork.Vrf, privatePrimaryNetwork.Vrf) + + for _, r := range cfg.Network.FirewallRules().Ingress { + ports := make([]string, len(r.Ports)) + for i, v := range r.Ports { + ports[i] = strconv.Itoa(int(v)) + } + destinationSpec := "" + if len(r.To) > 0 { + af, err := getAddressFamily(r.To[0]) // To is validated to contain no mixed addressfamilies in metal-api + if err != nil { + continue + } + destinationSpec = fmt.Sprintf("%s daddr { %s }", af, strings.Join(r.To, ", ")) + } else if outputInterfacenames != "" { + destinationSpec = outputInterfacenames + } else { + cfg.Log.Warn("no to address specified but not private primary network present, skipping this rule", "rule", r) + continue + } + + for _, saddr := range r.From { + af, err := getAddressFamily(saddr) + if err != nil { + return nil, err + } + protocolString, err := enum.GetStringValue(r.Protocol) + if err != nil { + return nil, err + } + ingressRules = append(ingressRules, fmt.Sprintf("%s %s saddr %s %s dport { %s } counter accept comment %q", destinationSpec, af, saddr, strings.ToLower(*protocolString), strings.Join(ports, ","), r.Comment)) + } + } + return &firewallRules{ + Egress: egressRules, + Ingress: ingressRules, + }, nil +} + +func getAddressFamily(p string) (string, error) { + prefix, err := netip.ParsePrefix(p) + if err != nil { + return "", err + } + + family := "ip" + if prefix.Addr().Is6() { + family = "ip6" + } + + return family, nil +} + +// validate validates network interfaces configuration. +func validate(cfg *Config, path string) error { + cfg.Log.Debug("running 'nft --check --file' to validate changes.", "file", path) + + cmd := exec.Command("nft", "--check", "--file", path) + out, err := cmd.CombinedOutput() + if err != nil { + cfg.Log.Error("nft validation failed", "output", string(out), "error", err) + return err + } + return nil +} diff --git a/pkg/nftables/nftables_test.go b/pkg/nftables/nftables_test.go new file mode 100644 index 0000000..ef5e7fa --- /dev/null +++ b/pkg/nftables/nftables_test.go @@ -0,0 +1,378 @@ +package nftables + +import ( + "embed" + "log/slog" + "path" + "testing" + + "github.com/google/go-cmp/cmp" + apiv2 "github.com/metal-stack/api/go/metalstack/api/v2" + "github.com/metal-stack/os-installer/pkg/network" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + //go:embed test + expectedNftableFiles embed.FS + + firewallAllocation = &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Ips: []string{"10.1.0.1"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/22"}, + Ips: []string{"100.127.129.1"}, + Vrf: 104010, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet-v6", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2001::4"}, + }, + }, + } + + firewallWithVPNAllocation = &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + // NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Ips: []string{"10.1.0.1"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/22"}, + Ips: []string{"100.127.129.1"}, + Vrf: 104010, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet-v6", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2001::4"}, + }, + }, + Vpn: &apiv2.MachineVPN{ + ControlPlaneAddress: "https://test.test.dev", + AuthKey: "abracadabra", + }, + } + firewallWithRulesAllocation = &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"10.0.16.0/22"}, + Ips: []string{"10.0.16.2"}, + Vrf: 3981, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + // NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Ips: []string{"10.1.0.1"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/22"}, + Ips: []string{"100.127.129.1"}, + Vrf: 104010, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet-v6", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2001::4"}, + }, + }, + FirewallRules: &apiv2.FirewallRules{ + Egress: []*apiv2.FirewallEgressRule{ + { + Comment: "allow apt update", + Protocol: apiv2.IPProtocol_IP_PROTOCOL_TCP, + Ports: []uint32{443}, + To: []string{"0.0.0.0/0", "1.2.3.4/32"}, + }, + { + Comment: "allow apt update v6", + Protocol: apiv2.IPProtocol_IP_PROTOCOL_TCP, + Ports: []uint32{443}, + To: []string{"::/0"}, + }, + }, + Ingress: []*apiv2.FirewallIngressRule{ + { + Comment: "allow incoming ssh", + Protocol: apiv2.IPProtocol_IP_PROTOCOL_TCP, + Ports: []uint32{22}, + From: []string{"2.3.4.0/24", "192.168.1.0/16"}, + To: []string{"100.1.2.3/32", "100.1.2.4/32"}, + }, + { + Comment: "allow incoming ssh ipv6", + Protocol: apiv2.IPProtocol_IP_PROTOCOL_TCP, + Ports: []uint32{22}, + From: []string{"2001:db8::1/128"}, + To: []string{"2001:db8:0:113::/64"}, + }, + { + Protocol: apiv2.IPProtocol_IP_PROTOCOL_TCP, + Ports: []uint32{80, 443, 8080}, + From: []string{"1.2.3.0/24", "192.168.0.0/16"}, + }, + }, + }, + } + + firewallSharedAllocation = &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Project: "dd429d45-db03-4627-887f-bf7761d376a5", + Networks: []*apiv2.MachineNetwork{ + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + Project: new("dd429d45-db03-4627-887f-bf7761d376a5"), + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"185.1.2.3"}, + DestinationPrefixes: []string{"0.0.0.0/0"}, + Vrf: 104009, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Ips: []string{"10.1.0.1"}, + }, + { + Network: "internet-v6", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2001::4"}, + }, + }, + } + + firewallIPv6Allocation = &apiv2.MachineAllocation{ + AllocationType: apiv2.MachineAllocationType_MACHINE_ALLOCATION_TYPE_FIREWALL, + Networks: []*apiv2.MachineNetwork{ + { + Network: "379d294d-22e8-4aed-82e1-62c6c2f08d6a", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD, + Prefixes: []string{"2002::/64"}, + Ips: []string{"2002::1"}, + Vrf: 3981, + }, + { + Network: "partition-storage", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_CHILD_SHARED, + Prefixes: []string{"10.0.18.0/22"}, + Ips: []string{"10.0.18.2"}, + Vrf: 3982, + }, + { + Network: "internet", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"2a02:c00:20::/45"}, + Ips: []string{"2a02:c00:20::1"}, + DestinationPrefixes: []string{"::/0"}, + Vrf: 104009, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "underlay", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_UNDERLAY, + Ips: []string{"10.1.0.1"}, + }, + { + Network: "mpls", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Prefixes: []string{"100.127.129.0/22"}, + Ips: []string{"100.127.129.1"}, + Vrf: 104010, + NatType: apiv2.NATType_NAT_TYPE_IPV4_MASQUERADE, + }, + { + Network: "internet-v6", + NetworkType: apiv2.NetworkType_NETWORK_TYPE_EXTERNAL, + Ips: []string{"2001::4"}, + }, + }, + } +) + +func TestRender(t *testing.T) { + tests := []struct { + name string + allocation *apiv2.MachineAllocation + enableDNSProxy bool + forwardPolicy ForwardPolicy + wantFilePath string + wantErr error + }{ + { + name: "render firewall, forward drop", + allocation: firewallAllocation, + wantFilePath: "nftrules", + enableDNSProxy: false, + forwardPolicy: ForwardPolicyDrop, + wantErr: nil, + }, + { + name: "render firewall, forward accept", + allocation: firewallAllocation, + wantFilePath: "nftrules_accept_forwarding", + enableDNSProxy: false, + forwardPolicy: ForwardPolicyAccept, + wantErr: nil, + }, + { + name: "render firewall with vpn", + allocation: firewallWithVPNAllocation, + wantFilePath: "nftrules_vpn", + enableDNSProxy: false, + forwardPolicy: ForwardPolicyDrop, + wantErr: nil, + }, + { + name: "render firewall with rules", + allocation: firewallWithRulesAllocation, + wantFilePath: "nftrules_with_rules", + enableDNSProxy: false, + forwardPolicy: ForwardPolicyDrop, + wantErr: nil, + }, + { + name: "render firewall shared", + allocation: firewallSharedAllocation, + wantFilePath: "nftrules_shared", + enableDNSProxy: true, + forwardPolicy: ForwardPolicyDrop, + wantErr: nil, + }, + { + name: "render firewall ipv6", + allocation: firewallIPv6Allocation, + wantFilePath: "nftrules_ipv6", + enableDNSProxy: true, + forwardPolicy: ForwardPolicyDrop, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + _, gotErr := Render(t.Context(), &Config{ + Log: slog.Default(), + fs: fs, + Network: network.New(tt.allocation), + EnableDNSProxy: tt.enableDNSProxy, + ForwardPolicy: tt.forwardPolicy, + Validate: false, + }) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(nftrulesPath) + require.NoError(t, err) + + assert.Equal(t, mustReadExpected(tt.wantFilePath), string(content)) + }) + } +} + +func mustReadExpected(name string) string { + tpl, err := expectedNftableFiles.ReadFile(path.Join("test", name)) + if err != nil { + panic(err) + } + + return string(tpl) +} diff --git a/pkg/network/tpl/nftrules.tpl b/pkg/nftables/nftrules.tpl similarity index 97% rename from pkg/network/tpl/nftrules.tpl rename to pkg/nftables/nftrules.tpl index 96ec1be..c97eea1 100644 --- a/pkg/network/tpl/nftrules.tpl +++ b/pkg/nftables/nftrules.tpl @@ -1,5 +1,4 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.NftablesData*/ -}} -{{ .Comment }} +# {{ .Comment }} table inet metal { chain input { type filter hook input priority 0; policy drop; @@ -26,7 +25,7 @@ table inet metal { iifname "{{ . }}" tcp dport 9100 counter accept comment "node metrics" iifname "{{ . }}" tcp dport 9630 counter accept comment "nftables metrics" {{- end }} - + ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" counter jump refuse } @@ -41,7 +40,7 @@ table inet metal { {{- range .FirewallRules.Ingress }} {{ . }} {{- end }} - {{ if eq .ForwardPolicy "drop" -}} + {{- if eq .ForwardPolicy "drop" }} limit rate 2/minute counter log prefix "nftables-metal-dropped: " {{- end }} } @@ -53,7 +52,7 @@ table inet metal { oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - + ct state established,related counter accept comment "stateful output" ct state invalid counter drop comment "drop invalid packets" } @@ -128,4 +127,4 @@ table inet nat { {{- end }} {{- end }} } -} \ No newline at end of file +} diff --git a/pkg/network/testdata/nftrules b/pkg/nftables/test/nftrules similarity index 93% rename from pkg/network/testdata/nftrules rename to pkg/nftables/test/nftrules index 9c9fc40..99033c8 100644 --- a/pkg/network/testdata/nftrules +++ b/pkg/nftables/test/nftrules @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer table inet metal { chain input { type filter hook input priority 0; policy drop; @@ -17,7 +16,7 @@ table inet metal { iifname "vrf3981" tcp dport 9630 counter accept comment "nftables metrics" iifname "vrf3982" tcp dport 9100 counter accept comment "node metrics" iifname "vrf3982" tcp dport 9630 counter accept comment "nftables metrics" - + ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" counter jump refuse } @@ -36,7 +35,7 @@ table inet metal { oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - + ct state established,related counter accept comment "stateful output" ct state invalid counter drop comment "drop invalid packets" } @@ -70,7 +69,7 @@ table inet nat { } chain postrouting { type nat hook postrouting priority 0; policy accept; - oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" - oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls-nbg-w8101-test)" + oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet)" + oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls)" } -} \ No newline at end of file +} diff --git a/pkg/network/testdata/nftrules_accept_forwarding b/pkg/nftables/test/nftrules_accept_forwarding similarity index 92% rename from pkg/network/testdata/nftrules_accept_forwarding rename to pkg/nftables/test/nftrules_accept_forwarding index bdbd8da..9ea7b03 100644 --- a/pkg/network/testdata/nftrules_accept_forwarding +++ b/pkg/nftables/test/nftrules_accept_forwarding @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer table inet metal { chain input { type filter hook input priority 0; policy drop; @@ -17,7 +16,7 @@ table inet metal { iifname "vrf3981" tcp dport 9630 counter accept comment "nftables metrics" iifname "vrf3982" tcp dport 9100 counter accept comment "node metrics" iifname "vrf3982" tcp dport 9630 counter accept comment "nftables metrics" - + ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" counter jump refuse } @@ -26,7 +25,6 @@ table inet metal { ct state invalid counter drop comment "drop invalid packets from forwarding to prevent malicious activity" ct state established,related counter accept comment "stateful forward" tcp dport bgp ct state new counter jump refuse comment "block bgp forward to machines" - } chain output { type filter hook output priority 0; policy accept; @@ -36,7 +34,7 @@ table inet metal { oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - + ct state established,related counter accept comment "stateful output" ct state invalid counter drop comment "drop invalid packets" } @@ -70,7 +68,7 @@ table inet nat { } chain postrouting { type nat hook postrouting priority 0; policy accept; - oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" - oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls-nbg-w8101-test)" + oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet)" + oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls)" } -} \ No newline at end of file +} diff --git a/pkg/network/testdata/nftrules_ipv6 b/pkg/nftables/test/nftrules_ipv6 similarity index 95% rename from pkg/network/testdata/nftrules_ipv6 rename to pkg/nftables/test/nftrules_ipv6 index 4dc27b7..3d7cb48 100644 --- a/pkg/network/testdata/nftrules_ipv6 +++ b/pkg/nftables/test/nftrules_ipv6 @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer table inet metal { chain input { type filter hook input priority 0; policy drop; @@ -20,7 +19,7 @@ table inet metal { iifname "vrf3981" tcp dport 9630 counter accept comment "nftables metrics" iifname "vrf3982" tcp dport 9100 counter accept comment "node metrics" iifname "vrf3982" tcp dport 9630 counter accept comment "nftables metrics" - + ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" counter jump refuse } @@ -39,7 +38,7 @@ table inet metal { oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - + ct state established,related counter accept comment "stateful output" ct state invalid counter drop comment "drop invalid packets" } @@ -92,7 +91,7 @@ table inet nat { } chain postrouting { type nat hook postrouting priority 0; policy accept; - oifname "vlan104009" ip6 saddr 2002::/64 ip6 daddr != 2a02:c00:20::1 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" - oifname "vlan104010" ip6 saddr 2002::/64 counter masquerade random comment "snat (networkid: mpls-nbg-w8101-test)" + oifname "vlan104009" ip6 saddr 2002::/64 ip6 daddr != 2a02:c00:20::1 counter masquerade random comment "snat (networkid: internet)" + oifname "vlan104010" ip6 saddr 2002::/64 counter masquerade random comment "snat (networkid: mpls)" } -} \ No newline at end of file +} diff --git a/pkg/network/testdata/nftrules_shared b/pkg/nftables/test/nftrules_shared similarity index 95% rename from pkg/network/testdata/nftrules_shared rename to pkg/nftables/test/nftrules_shared index ff571e6..382c127 100644 --- a/pkg/network/testdata/nftrules_shared +++ b/pkg/nftables/test/nftrules_shared @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer table inet metal { chain input { type filter hook input priority 0; policy drop; @@ -18,7 +17,7 @@ table inet metal { tcp dport ssh ct state new counter accept comment "SSH incoming connections" iifname "vrf3982" tcp dport 9100 counter accept comment "node metrics" iifname "vrf3982" tcp dport 9630 counter accept comment "nftables metrics" - + ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" counter jump refuse } @@ -37,7 +36,7 @@ table inet metal { oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - + ct state established,related counter accept comment "stateful output" ct state invalid counter drop comment "drop invalid packets" } @@ -77,7 +76,7 @@ table inet nat { } chain postrouting { type nat hook postrouting priority 0; policy accept; - oifname "vlan3982" ip saddr 10.0.18.0/22 counter masquerade random comment "snat (networkid: storage-net)" - oifname "vlan104009" ip saddr 10.0.18.0/22 ip daddr != 185.1.2.3 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" + oifname "vlan3982" ip saddr 10.0.18.0/22 counter masquerade random comment "snat (networkid: partition-storage)" + oifname "vlan104009" ip saddr 10.0.18.0/22 ip daddr != 185.1.2.3 counter masquerade random comment "snat (networkid: internet)" } -} \ No newline at end of file +} diff --git a/pkg/network/testdata/nftrules_vpn b/pkg/nftables/test/nftrules_vpn similarity index 92% rename from pkg/network/testdata/nftrules_vpn rename to pkg/nftables/test/nftrules_vpn index 55c5d06..988e3c3 100644 --- a/pkg/network/testdata/nftrules_vpn +++ b/pkg/nftables/test/nftrules_vpn @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer table inet metal { chain input { type filter hook input priority 0; policy drop; @@ -17,7 +16,7 @@ table inet metal { iifname "vrf3981" tcp dport 9630 counter accept comment "nftables metrics" iifname "vrf3982" tcp dport 9100 counter accept comment "node metrics" iifname "vrf3982" tcp dport 9630 counter accept comment "nftables metrics" - + ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" counter jump refuse } @@ -36,7 +35,7 @@ table inet metal { oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - + ct state established,related counter accept comment "stateful output" ct state invalid counter drop comment "drop invalid packets" } @@ -70,7 +69,7 @@ table inet nat { } chain postrouting { type nat hook postrouting priority 0; policy accept; - oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" - oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls-nbg-w8101-test)" + oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet)" + oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls)" } -} \ No newline at end of file +} diff --git a/pkg/network/testdata/nftrules_with_rules b/pkg/nftables/test/nftrules_with_rules similarity index 94% rename from pkg/network/testdata/nftrules_with_rules rename to pkg/nftables/test/nftrules_with_rules index 0ec7073..5941306 100644 --- a/pkg/network/testdata/nftrules_with_rules +++ b/pkg/nftables/test/nftrules_with_rules @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer table inet metal { chain input { type filter hook input priority 0; policy drop; @@ -17,7 +16,7 @@ table inet metal { iifname "vrf3981" tcp dport 9630 counter accept comment "nftables metrics" iifname "vrf3982" tcp dport 9100 counter accept comment "node metrics" iifname "vrf3982" tcp dport 9630 counter accept comment "nftables metrics" - + ct state invalid counter drop comment "drop invalid packets to prevent malicious activity" counter jump refuse } @@ -46,7 +45,7 @@ table inet metal { oifname "lan1" ip6 saddr fe80::/64 tcp dport bgp counter accept comment "bgp unnumbered output at lan1" ip daddr 10.0.0.0/8 udp dport 4789 counter accept comment "outgoing VXLAN" - + ct state established,related counter accept comment "stateful output" ct state invalid counter drop comment "drop invalid packets" } @@ -80,7 +79,7 @@ table inet nat { } chain postrouting { type nat hook postrouting priority 0; policy accept; - oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet-vagrant-lab)" - oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls-nbg-w8101-test)" + oifname "vlan104009" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: internet)" + oifname "vlan104010" ip saddr 10.0.16.0/22 counter masquerade random comment "snat (networkid: mpls)" } -} \ No newline at end of file +} diff --git a/templates/chrony.conf.tpl b/pkg/services/chrony/chrony.conf.tpl similarity index 94% rename from templates/chrony.conf.tpl rename to pkg/services/chrony/chrony.conf.tpl index fb4523a..fc70fe7 100644 --- a/templates/chrony.conf.tpl +++ b/pkg/services/chrony/chrony.conf.tpl @@ -6,8 +6,8 @@ # anycast network of 180+ locations to synchronize time from their closest server. # See https://blog.cloudflare.com/secure-time/ -{{- range .NTPServers}} -pool {{ .Address }} iburst +{{- range .NTPServers }} +pool {{ . }} iburst {{- end }} # This directive specify the location of the file containing ID/key pairs for @@ -33,4 +33,4 @@ rtcsync # Step the system clock instead of slewing it if the adjustment is larger than # one second, but only in the first three clock updates. -makestep 1 3 \ No newline at end of file +makestep 1 3 diff --git a/pkg/services/chrony/chrony.go b/pkg/services/chrony/chrony.go new file mode 100644 index 0000000..3cb3801 --- /dev/null +++ b/pkg/services/chrony/chrony.go @@ -0,0 +1,73 @@ +package chrony + +import ( + "context" + "fmt" + "log/slog" + + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" + "github.com/spf13/afero" + + _ "embed" +) + +const ( + chronyConfigPath = "/etc/chrony/chrony.conf" +) + +var ( + //go:embed chrony.conf.tpl + ChronyConfigTemplateString string +) + +type Config struct { + Log *slog.Logger + Reload bool + Enable bool + // ChronyConfigPath allows overwriting the default chrony config path + ChronyConfigPath string + fs afero.Fs +} + +type TemplateData struct { + NTPServers []string +} + +func WriteSystemdUnit(ctx context.Context, cfg *Config, c *TemplateData, vrfName string) (changed bool, err error) { + serviceName := fmt.Sprintf("chrony@%s.service", vrfName) + + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: ChronyConfigTemplateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + path := chronyConfigPath + if cfg.ChronyConfigPath != "" { + path = cfg.ChronyConfigPath + } + + changed, err = r.Render(ctx, path) + if err != nil { + return changed, err + } + + if cfg.Enable { + if err := systemd_renderer.Enable(ctx, cfg.Log, serviceName); err != nil { + return changed, err + } + } + + if cfg.Reload && changed { + if err := systemd_renderer.Reload(ctx, cfg.Log.With("service-name", "chrony"), serviceName); err != nil { + return changed, err + } + } + + return changed, nil +} diff --git a/pkg/services/chrony/chrony_test.go b/pkg/services/chrony/chrony_test.go new file mode 100644 index 0000000..eb92c4c --- /dev/null +++ b/pkg/services/chrony/chrony_test.go @@ -0,0 +1,78 @@ +package chrony + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test/default/chrony.conf + expectedDefaultConfig string + //go:embed test/custom/chrony.conf + expectedCustomConfig string +) + +func TestWriteSystemdUnit(t *testing.T) { + tests := []struct { + name string + c *TemplateData + wantConfig string + wantChanged bool + wantErr error + }{ + { + name: "render default", + c: &TemplateData{ + NTPServers: []string{"time.cloudflare.com"}, + }, + wantConfig: expectedDefaultConfig, + wantChanged: true, + wantErr: nil, + }, + { + name: "render custom", + c: &TemplateData{ + NTPServers: []string{"1.2.3.4", "1.2.3.5"}, + }, + wantConfig: expectedCustomConfig, + wantChanged: true, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotChanged, gotErr := WriteSystemdUnit(t.Context(), &Config{ + Log: slog.Default(), + Reload: false, + fs: fs, + }, tt.c, "vrf104009") + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(chronyConfigPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantConfig, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/templates/test_data/customntp/chrony.conf b/pkg/services/chrony/test/custom/chrony.conf similarity index 95% rename from templates/test_data/customntp/chrony.conf rename to pkg/services/chrony/test/custom/chrony.conf index f8ad3a4..8684b4f 100644 --- a/templates/test_data/customntp/chrony.conf +++ b/pkg/services/chrony/test/custom/chrony.conf @@ -5,7 +5,8 @@ # Cloudflare offers a free public time service that allows us to use their # anycast network of 180+ locations to synchronize time from their closest server. # See https://blog.cloudflare.com/secure-time/ -pool custom.1.ntp.org iburst +pool 1.2.3.4 iburst +pool 1.2.3.5 iburst # This directive specify the location of the file containing ID/key pairs for # NTP authentication. @@ -30,4 +31,4 @@ rtcsync # Step the system clock instead of slewing it if the adjustment is larger than # one second, but only in the first three clock updates. -makestep 1 3 \ No newline at end of file +makestep 1 3 diff --git a/templates/test_data/defaultntp/chrony.conf b/pkg/services/chrony/test/default/chrony.conf similarity index 98% rename from templates/test_data/defaultntp/chrony.conf rename to pkg/services/chrony/test/default/chrony.conf index 30ce9da..a747bea 100644 --- a/templates/test_data/defaultntp/chrony.conf +++ b/pkg/services/chrony/test/default/chrony.conf @@ -30,4 +30,4 @@ rtcsync # Step the system clock instead of slewing it if the adjustment is larger than # one second, but only in the first three clock updates. -makestep 1 3 \ No newline at end of file +makestep 1 3 diff --git a/pkg/services/droptailer/droptailer.go b/pkg/services/droptailer/droptailer.go new file mode 100644 index 0000000..2165fff --- /dev/null +++ b/pkg/services/droptailer/droptailer.go @@ -0,0 +1,48 @@ +package droptailer + +import ( + "context" + _ "embed" + "log/slog" + + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + "github.com/spf13/afero" +) + +const ( + serviceName = "droptailer.service" + serviceUnitPath = "/etc/systemd/system/" + serviceName +) + +var ( + //go:embed droptailer.service.tpl + templateString string +) + +type Config struct { + Log *slog.Logger + Enable bool + Reload bool + fs afero.Fs +} + +type TemplateData struct { + Comment string + TenantVrf string +} + +func WriteSystemdUnit(ctx context.Context, cfg *Config, c *TemplateData) (changed bool, err error) { + r, err := systemd_renderer.New(&systemd_renderer.Config{ + Log: cfg.Log, + Enable: cfg.Enable, + ServiceName: serviceName, + TemplateString: templateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + return r.Render(ctx, serviceUnitPath, cfg.Reload) +} diff --git a/pkg/network/tpl/droptailer.service.tpl b/pkg/services/droptailer/droptailer.service.tpl similarity index 84% rename from pkg/network/tpl/droptailer.service.tpl rename to pkg/services/droptailer/droptailer.service.tpl index 48e2c8c..c7aa6e5 100644 --- a/pkg/network/tpl/droptailer.service.tpl +++ b/pkg/services/droptailer/droptailer.service.tpl @@ -1,5 +1,4 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.DroptailerData*/ -}} -{{ .Comment }} +# {{ .Comment }} [Unit] Description=Droptailer After=network.target diff --git a/pkg/services/droptailer/droptailer_test.go b/pkg/services/droptailer/droptailer_test.go new file mode 100644 index 0000000..4c05d87 --- /dev/null +++ b/pkg/services/droptailer/droptailer_test.go @@ -0,0 +1,68 @@ +package droptailer + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test/droptailer.service + expectedSystemdUnit string +) + +func TestWriteSystemdUnit(t *testing.T) { + tests := []struct { + name string + c *TemplateData + wantService string + wantChanged bool + wantErr error + }{ + { + name: "render", + c: &TemplateData{ + Comment: `generated by os-installer`, + TenantVrf: "vrf3981", + }, + wantService: expectedSystemdUnit, + wantChanged: true, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotChanged, gotErr := WriteSystemdUnit(t.Context(), &Config{ + Log: slog.Default(), + Reload: false, + fs: fs, + }, tt.c) + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(serviceUnitPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantService, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/pkg/network/testdata/droptailer.service b/pkg/services/droptailer/test/droptailer.service similarity index 82% rename from pkg/network/testdata/droptailer.service rename to pkg/services/droptailer/test/droptailer.service index 1c8b53a..ec36135 100644 --- a/pkg/network/testdata/droptailer.service +++ b/pkg/services/droptailer/test/droptailer.service @@ -1,5 +1,4 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . -# Do not edit. +# generated by os-installer [Unit] Description=Droptailer After=network.target diff --git a/pkg/services/firewall-controller/firewall-controller.go b/pkg/services/firewall-controller/firewall-controller.go new file mode 100644 index 0000000..361820b --- /dev/null +++ b/pkg/services/firewall-controller/firewall-controller.go @@ -0,0 +1,49 @@ +package firewallcontroller + +import ( + "context" + "log/slog" + + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + "github.com/spf13/afero" + + _ "embed" +) + +const ( + serviceName = "firewall-controller.service" + serviceUnitPath = "/etc/systemd/system/" + serviceName +) + +var ( + //go:embed firewall_controller.service.tpl + templateString string +) + +type Config struct { + Log *slog.Logger + Enable bool + Reload bool + fs afero.Fs +} + +type TemplateData struct { + Comment string + DefaultRouteVrf string +} + +func WriteSystemdUnit(ctx context.Context, cfg *Config, c *TemplateData) (changed bool, err error) { + r, err := systemd_renderer.New(&systemd_renderer.Config{ + Log: cfg.Log, + Enable: cfg.Enable, + ServiceName: serviceName, + TemplateString: templateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + return r.Render(ctx, serviceUnitPath, cfg.Reload) +} diff --git a/pkg/services/firewall-controller/firewall-controller_test.go b/pkg/services/firewall-controller/firewall-controller_test.go new file mode 100644 index 0000000..2e6f380 --- /dev/null +++ b/pkg/services/firewall-controller/firewall-controller_test.go @@ -0,0 +1,67 @@ +package firewallcontroller + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test/firewall-controller.service + expectedSystemdUnit string +) + +func TestWriteSystemdUnit(t *testing.T) { + tests := []struct { + name string + c *TemplateData + wantService string + wantChanged bool + wantErr error + }{ + { + name: "render", + c: &TemplateData{ + Comment: `Do not edit.`, + DefaultRouteVrf: "vrf104009", + }, + wantService: expectedSystemdUnit, + wantChanged: true, + wantErr: nil, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotChanged, gotErr := WriteSystemdUnit(t.Context(), &Config{ + Log: slog.Default(), + Reload: false, + fs: fs, + }, tt.c) + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(serviceUnitPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantService, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/pkg/network/tpl/firewall_controller.service.tpl b/pkg/services/firewall-controller/firewall_controller.service.tpl similarity index 76% rename from pkg/network/tpl/firewall_controller.service.tpl rename to pkg/services/firewall-controller/firewall_controller.service.tpl index 8ec206c..c356779 100644 --- a/pkg/network/tpl/firewall_controller.service.tpl +++ b/pkg/services/firewall-controller/firewall_controller.service.tpl @@ -1,5 +1,4 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.FirewallControllerData*/ -}} -{{ .Comment }} +# {{ .Comment }} [Unit] Description=Firewall controller - configures the firewall based on k8s resources After=network.target diff --git a/pkg/network/testdata/firewall-controller.service b/pkg/services/firewall-controller/test/firewall-controller.service similarity index 78% rename from pkg/network/testdata/firewall-controller.service rename to pkg/services/firewall-controller/test/firewall-controller.service index 8eb2430..e9491f6 100644 --- a/pkg/network/testdata/firewall-controller.service +++ b/pkg/services/firewall-controller/test/firewall-controller.service @@ -1,4 +1,3 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . # Do not edit. [Unit] Description=Firewall controller - configures the firewall based on k8s resources diff --git a/pkg/services/install.go b/pkg/services/install.go new file mode 100644 index 0000000..7e099e7 --- /dev/null +++ b/pkg/services/install.go @@ -0,0 +1,134 @@ +package services + +import ( + "context" + "errors" + "log/slog" + "strings" + + "github.com/metal-stack/os-installer/pkg/network" + "github.com/metal-stack/os-installer/pkg/services/chrony" + "github.com/metal-stack/os-installer/pkg/services/droptailer" + firewallcontroller "github.com/metal-stack/os-installer/pkg/services/firewall-controller" + nftablesexporter "github.com/metal-stack/os-installer/pkg/services/nftables-exporter" + nodeexporter "github.com/metal-stack/os-installer/pkg/services/node-exporter" + "github.com/metal-stack/os-installer/pkg/services/suricata" + "github.com/metal-stack/os-installer/pkg/services/tailscale" +) + +func WriteSystemdServices(ctx context.Context, log *slog.Logger, network *network.Network, machineUUID string) error { + if network.IsMachine() { + return nil + } + + var ( + errs []error + defaultRouteVRF string + tenantVRF string + ) + + defaultRouteVRF, err := network.GetDefaultRouteNetworkVrfName() + if err != nil { + errs = append(errs, err) + } + tenantVRF, err = network.GetTenantNetworkVrfName() + if err != nil { + errs = append(errs, err) + } + + // Droptailer + if _, err = droptailer.WriteSystemdUnit(ctx, &droptailer.Config{ + Log: log, + Enable: true, + Reload: false, + }, &droptailer.TemplateData{ + Comment: "created from os-installer", + TenantVrf: tenantVRF, + }); err != nil { + errs = append(errs, err) + } + + // Chrony + if _, err = chrony.WriteSystemdUnit(ctx, &chrony.Config{ + Log: log, + Enable: true, + Reload: false, + ChronyConfigPath: "", + }, &chrony.TemplateData{ + NTPServers: network.NTPServers(), + }, defaultRouteVRF); err != nil { + errs = append(errs, err) + } + + // firewall-controller + if _, err = firewallcontroller.WriteSystemdUnit(ctx, &firewallcontroller.Config{ + Log: log, + Enable: true, + Reload: false, + }, &firewallcontroller.TemplateData{ + Comment: "created from os-installer", + DefaultRouteVrf: defaultRouteVRF, + }); err != nil { + errs = append(errs, err) + } + + // nftables-exporter + if _, err := nftablesexporter.WriteSystemdUnit(ctx, &nftablesexporter.Config{ + Log: log, + Enable: true, + Reload: false, + }, &nftablesexporter.TemplateData{ + Comment: "created from os-installer", + }); err != nil { + errs = append(errs, err) + } + + // node-exporter + if _, err := nodeexporter.WriteSystemdUnit(ctx, &nodeexporter.Config{ + Log: log, + Enable: true, + Reload: false, + }, &nodeexporter.TemplateData{ + Comment: "created from os-installer", + }); err != nil { + errs = append(errs, err) + } + + // suricata + // + // TODO: this listens only on one internet facing interface, but should listening on all external interfaces. + suricataInterface := strings.ReplaceAll(defaultRouteVRF, "vrf", "vlan") + if _, err := suricata.WriteSystemdUnit(ctx, &suricata.Config{ + Log: log, + Enable: true, + Reload: false, + }, &suricata.TemplateData{ + Interface: suricataInterface, + DefaultRouteVrf: defaultRouteVRF, + }); err != nil { + errs = append(errs, err) + } + + // tailscale + if network.HasVpn() { + vpn := network.Vpn() + if _, err := tailscale.WriteSystemdUnit(ctx, &tailscale.Config{ + Log: log, + Enable: true, + Reload: false, + }, &tailscale.TemplateData{ + Comment: "created from os-installer", + DefaultRouteVrf: defaultRouteVRF, + MachineID: machineUUID, + AuthKey: vpn.AuthKey, + Address: vpn.ControlPlaneAddress, + }); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} diff --git a/pkg/services/nftables-exporter/nftables-exporter.go b/pkg/services/nftables-exporter/nftables-exporter.go new file mode 100644 index 0000000..c5dc984 --- /dev/null +++ b/pkg/services/nftables-exporter/nftables-exporter.go @@ -0,0 +1,47 @@ +package nftablesexporter + +import ( + "context" + _ "embed" + "log/slog" + + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + "github.com/spf13/afero" +) + +const ( + serviceName = "nftables-exporter.service" + serviceUnitPath = "/etc/systemd/system/" + serviceName +) + +var ( + //go:embed nftables_exporter.service.tpl + templateString string +) + +type Config struct { + Log *slog.Logger + Enable bool + Reload bool + fs afero.Fs +} + +type TemplateData struct { + Comment string +} + +func WriteSystemdUnit(ctx context.Context, cfg *Config, c *TemplateData) (changed bool, err error) { + r, err := systemd_renderer.New(&systemd_renderer.Config{ + Log: cfg.Log, + Enable: cfg.Enable, + ServiceName: serviceName, + TemplateString: templateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + return r.Render(ctx, serviceUnitPath, cfg.Reload) +} diff --git a/pkg/services/nftables-exporter/nftables-exporter_test.go b/pkg/services/nftables-exporter/nftables-exporter_test.go new file mode 100644 index 0000000..e26f809 --- /dev/null +++ b/pkg/services/nftables-exporter/nftables-exporter_test.go @@ -0,0 +1,67 @@ +package nftablesexporter + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test/nftables-exporter.service + expectedSystemdUnit string +) + +func TestWriteSystemdUnit(t *testing.T) { + tests := []struct { + name string + c *TemplateData + wantService string + wantChanged bool + wantErr error + }{ + { + name: "render", + c: &TemplateData{ + Comment: `Do not edit.`, + }, + wantService: expectedSystemdUnit, + wantChanged: true, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotChanged, gotErr := WriteSystemdUnit(t.Context(), &Config{ + Log: slog.Default(), + Reload: false, + fs: fs, + }, tt.c) + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(serviceUnitPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantService, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/pkg/network/tpl/nftables_exporter.service.tpl b/pkg/services/nftables-exporter/nftables_exporter.service.tpl similarity index 70% rename from pkg/network/tpl/nftables_exporter.service.tpl rename to pkg/services/nftables-exporter/nftables_exporter.service.tpl index 2381523..d11bbbb 100644 --- a/pkg/network/tpl/nftables_exporter.service.tpl +++ b/pkg/services/nftables-exporter/nftables_exporter.service.tpl @@ -1,5 +1,4 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.NftablesExporterData*/ -}} -{{ .Comment }} +# {{ .Comment }} [Unit] Description=Nftables exporter - provides prometheus metrics for nftables After=network.target diff --git a/pkg/network/testdata/nftables-exporter.service b/pkg/services/nftables-exporter/test/nftables-exporter.service similarity index 72% rename from pkg/network/testdata/nftables-exporter.service rename to pkg/services/nftables-exporter/test/nftables-exporter.service index 4a2d3b9..e855f07 100644 --- a/pkg/network/testdata/nftables-exporter.service +++ b/pkg/services/nftables-exporter/test/nftables-exporter.service @@ -1,4 +1,3 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . # Do not edit. [Unit] Description=Nftables exporter - provides prometheus metrics for nftables diff --git a/pkg/services/node-exporter/node-exporter.go b/pkg/services/node-exporter/node-exporter.go new file mode 100644 index 0000000..04c9db2 --- /dev/null +++ b/pkg/services/node-exporter/node-exporter.go @@ -0,0 +1,47 @@ +package nodeexporter + +import ( + "context" + _ "embed" + "log/slog" + + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + "github.com/spf13/afero" +) + +const ( + serviceName = "node-exporter.service" + serviceUnitPath = "/etc/systemd/system/" + serviceName +) + +var ( + //go:embed node_exporter.service.tpl + templateString string +) + +type Config struct { + Log *slog.Logger + Enable bool + Reload bool + fs afero.Fs +} + +type TemplateData struct { + Comment string +} + +func WriteSystemdUnit(ctx context.Context, cfg *Config, c *TemplateData) (changed bool, err error) { + r, err := systemd_renderer.New(&systemd_renderer.Config{ + Log: cfg.Log, + Enable: cfg.Enable, + ServiceName: serviceName, + TemplateString: templateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + return r.Render(ctx, serviceUnitPath, cfg.Reload) +} diff --git a/pkg/services/node-exporter/node-exporter_test.go b/pkg/services/node-exporter/node-exporter_test.go new file mode 100644 index 0000000..c3e652c --- /dev/null +++ b/pkg/services/node-exporter/node-exporter_test.go @@ -0,0 +1,67 @@ +package nodeexporter + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test/node-exporter.service + expectedSystemdUnit string +) + +func TestWriteSystemdUnit(t *testing.T) { + tests := []struct { + name string + c *TemplateData + wantService string + wantChanged bool + wantErr error + }{ + { + name: "render", + c: &TemplateData{ + Comment: `Do not edit.`, + }, + wantService: expectedSystemdUnit, + wantChanged: true, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotChanged, gotErr := WriteSystemdUnit(t.Context(), &Config{ + Log: slog.Default(), + Reload: false, + fs: fs, + }, tt.c) + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(serviceUnitPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantService, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/pkg/network/tpl/node_exporter.service.tpl b/pkg/services/node-exporter/node_exporter.service.tpl similarity index 70% rename from pkg/network/tpl/node_exporter.service.tpl rename to pkg/services/node-exporter/node_exporter.service.tpl index 3c5550e..48d9ad2 100644 --- a/pkg/network/tpl/node_exporter.service.tpl +++ b/pkg/services/node-exporter/node_exporter.service.tpl @@ -1,5 +1,4 @@ -{{- /*gotype: github.com/metal-stack/os-installer/pkg/network.NodeExporterData*/ -}} -{{ .Comment }} +# {{ .Comment }} [Unit] Description=Node exporter - provides prometheus metrics about the node After=network.target diff --git a/pkg/network/testdata/node-exporter.service b/pkg/services/node-exporter/test/node-exporter.service similarity index 71% rename from pkg/network/testdata/node-exporter.service rename to pkg/services/node-exporter/test/node-exporter.service index cd38f40..a9cb459 100644 --- a/pkg/network/testdata/node-exporter.service +++ b/pkg/services/node-exporter/test/node-exporter.service @@ -1,4 +1,3 @@ -# This file was auto generated for machine: 'e0ab02d2-27cd-5a5e-8efc-080ba80cf258' by app version . # Do not edit. [Unit] Description=Node exporter - provides prometheus metrics about the node diff --git a/pkg/services/suricata/suricata.go b/pkg/services/suricata/suricata.go new file mode 100644 index 0000000..6da07ab --- /dev/null +++ b/pkg/services/suricata/suricata.go @@ -0,0 +1,103 @@ +package suricata + +import ( + "context" + "log/slog" + + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" + "github.com/spf13/afero" + + _ "embed" +) + +const ( + suricataServiceName = "suricata.service" + + suricataUpdateServiceName = "suricata_update.service" + suricataUpdateServiceUnitPath = "/etc/systemd/system/" + suricataUpdateServiceName + + suricataDefaultsPath = "/etc/default/suricata" + suricataConfigPath = "/etc/suricata/suricata.yaml" +) + +var ( + //go:embed suricata.yaml.tpl + suricataConfigTemplateString string + //go:embed suricata_defaults.tpl + suricataDefaultsTemplateString string + //go:embed suricata_update.service.tpl + suricataUpdateServiceTemplateString string +) + +type Config struct { + Log *slog.Logger + Enable bool + Reload bool + fs afero.Fs +} + +type TemplateData struct { + Interface string + DefaultRouteVrf string +} + +func WriteSystemdUnit(ctx context.Context, cfg *Config, c *TemplateData) (changed bool, err error) { + r, err := systemd_renderer.New(&systemd_renderer.Config{ + ServiceName: suricataUpdateServiceName, + Log: cfg.Log, + TemplateString: suricataUpdateServiceTemplateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + chg, err := r.Render(ctx, suricataUpdateServiceUnitPath, cfg.Reload) + if err != nil { + return chg, err + } + + // return changed if one has changed + changed = changed || chg + + for _, spec := range []struct { + path string + templateString string + }{ + { + path: suricataDefaultsPath, + templateString: suricataDefaultsTemplateString, + }, + { + path: suricataConfigPath, + templateString: suricataConfigTemplateString, + }, + } { + r, err := renderer.New(&renderer.Config{ + Log: cfg.Log, + TemplateString: spec.templateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + chg, err := r.Render(ctx, spec.path) + if err != nil { + return chg, err + } + + changed = changed || chg + } + + if cfg.Reload && changed { + if err := systemd_renderer.Reload(ctx, cfg.Log.With("service-name", "suricata"), suricataServiceName); err != nil { + return changed, err + } + } + + return changed, nil +} diff --git a/pkg/network/tpl/suricata_config.yaml.tpl b/pkg/services/suricata/suricata.yaml.tpl similarity index 99% rename from pkg/network/tpl/suricata_config.yaml.tpl rename to pkg/services/suricata/suricata.yaml.tpl index 378b618..0d3ab7f 100644 --- a/pkg/network/tpl/suricata_config.yaml.tpl +++ b/pkg/services/suricata/suricata.yaml.tpl @@ -1021,8 +1021,8 @@ coredump: # This feature is currently only used by the reject* keywords. host-mode: auto -# Number of packets preallocated per thread. The default is 1024. A higher number -# will make sure each CPU will be more easily kept busy, but may negatively +# Number of packets preallocated per thread. The default is 1024. A higher number +# will make sure each CPU will be more easily kept busy, but may negatively # impact caching. #max-pending-packets: 1024 @@ -1057,7 +1057,7 @@ unix-command: # Magic file. The extension .mgc is added to the value here. #magic-file: /usr/share/file/magic -#magic-file: +#magic-file: # GeoIP2 database file. Specify path and filename of GeoIP2 database # if using rules with "geoip" rule option. diff --git a/pkg/network/tpl/suricata_defaults.tpl b/pkg/services/suricata/suricata_defaults.tpl similarity index 100% rename from pkg/network/tpl/suricata_defaults.tpl rename to pkg/services/suricata/suricata_defaults.tpl diff --git a/pkg/services/suricata/suricata_test.go b/pkg/services/suricata/suricata_test.go new file mode 100644 index 0000000..ca476ba --- /dev/null +++ b/pkg/services/suricata/suricata_test.go @@ -0,0 +1,90 @@ +package suricata + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test/suricata-update.service + expectedSuricataSystemdUnit string + //go:embed test/suricata.yaml + expectedSuricataConfig string + //go:embed test/suricata_defaults + expectedSuricataDefaults string +) + +func TestWriteSystemdUnit(t *testing.T) { + tests := []struct { + name string + c *TemplateData + wantSuricataService string + wantSuricataConfig string + wantSuricataDefaults string + wantChanged bool + wantErr error + }{ + { + name: "render", + c: &TemplateData{ + DefaultRouteVrf: "vrf104009", + Interface: "vlan104009", + }, + wantSuricataService: expectedSuricataSystemdUnit, + wantSuricataConfig: expectedSuricataConfig, + wantSuricataDefaults: expectedSuricataDefaults, + wantChanged: true, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotChanged, gotErr := WriteSystemdUnit(t.Context(), &Config{ + Log: slog.Default(), + Reload: false, + fs: fs, + }, tt.c) + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(suricataUpdateServiceUnitPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantSuricataService, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + + content, err = fs.ReadFile(suricataConfigPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantSuricataConfig, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + + content, err = fs.ReadFile(suricataDefaultsPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantSuricataDefaults, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/pkg/network/tpl/suricata_update.service.tpl b/pkg/services/suricata/suricata_update.service.tpl similarity index 100% rename from pkg/network/tpl/suricata_update.service.tpl rename to pkg/services/suricata/suricata_update.service.tpl diff --git a/pkg/network/testdata/suricata-update.service b/pkg/services/suricata/test/suricata-update.service similarity index 100% rename from pkg/network/testdata/suricata-update.service rename to pkg/services/suricata/test/suricata-update.service diff --git a/pkg/services/suricata/test/suricata.yaml b/pkg/services/suricata/test/suricata.yaml new file mode 100644 index 0000000..b500946 --- /dev/null +++ b/pkg/services/suricata/test/suricata.yaml @@ -0,0 +1,1836 @@ +%YAML 1.1 +--- + +# Suricata configuration file located in /etc/suricata +# In addition to the comments describing all +# options in this file, full documentation can be found at: +# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html + +## +## Step 1: inform Suricata about your network +## + +vars: + # more specific is better for alert accuracy and performance + address-groups: + HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]" + #HOME_NET: "[192.168.0.0/16]" + #HOME_NET: "[10.0.0.0/8]" + #HOME_NET: "[172.16.0.0/12]" + #HOME_NET: "any" + + EXTERNAL_NET: "!$HOME_NET" + #EXTERNAL_NET: "any" + + HTTP_SERVERS: "$HOME_NET" + SMTP_SERVERS: "$HOME_NET" + SQL_SERVERS: "$HOME_NET" + DNS_SERVERS: "$HOME_NET" + TELNET_SERVERS: "$HOME_NET" + AIM_SERVERS: "$EXTERNAL_NET" + DC_SERVERS: "$HOME_NET" + DNP3_SERVER: "$HOME_NET" + DNP3_CLIENT: "$HOME_NET" + MODBUS_CLIENT: "$HOME_NET" + MODBUS_SERVER: "$HOME_NET" + ENIP_CLIENT: "$HOME_NET" + ENIP_SERVER: "$HOME_NET" + + port-groups: + HTTP_PORTS: "80" + SHELLCODE_PORTS: "!80" + ORACLE_PORTS: 1521 + SSH_PORTS: 22 + DNP3_PORTS: 20000 + MODBUS_PORTS: 502 + FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]" + FTP_PORTS: 21 + VXLAN_PORTS: 4789 + +## +## Step 2: select outputs to enable +## + +# The default logging directory. Any log or output file will be +# placed here if its not specified with a full path name. This can be +# overridden with the -l command line parameter. +default-log-dir: /var/log/suricata/ + +# global stats configuration +stats: + enabled: yes + # The interval field (in seconds) controls at what interval + # the loggers are invoked. + interval: 8 + # Add decode events as stats. + #decoder-events: true + # Decoder event prefix in stats. Has been 'decoder' before, but that leads + # to missing events in the eve.stats records. See issue #2225. + #decoder-events-prefix: "decoder.event" + # Add stream events as stats. + #stream-events: false + +# Configure the type of alert (and other) logging you would like. +outputs: + # a line based alerts log similar to Snort's fast.log + - fast: + enabled: yes + filename: fast.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # Extensible Event Format (nicknamed EVE) event log in JSON format + - eve-log: + enabled: yes + filetype: regular + filename: eve.json + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #redis: + # server: 127.0.0.1 + # port: 6379 + # async: true ## if redis replies are read asynchronously + # mode: list ## possible values: list|lpush (default), rpush, channel|publish + # ## lpush and rpush are using a Redis list. "list" is an alias for lpush + # ## publish is using a Redis channel. "channel" is an alias for publish + # key: suricata ## key or channel to use (default to suricata) + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting as to be reserved to high traffic suricata. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entry to keep in buffer + + # Include top level metadata. Default yes. + #metadata: no + + # include the name of the input pcap file in pcap file processing mode + pcap-file: false + + # Community Flow ID + # Adds a 'community_id' field to EVE records. These are meant to give + # a records a predictable flow id that can be used to match records to + # output of other tools such as Bro. + # + # Takes a 'seed' that needs to be same across sensors and tools + # to make the id less predictable. + + # enable/disable the community id feature. + community-id: false + # Seed value for the ID output. Valid values are 0-65535. + community-id-seed: 0 + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # packet: yes # enable dumping of packet (without stream segments) + # metadata: no # enable inclusion of app layer metadata with alert. Default yes + # http-body: yes # Requires metadata; enable dumping of http body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of http body in printable format + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: yes + - anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is disabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # enabled. + enabled: yes + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no + - http: + extended: yes # enable this for extended logging information + # custom allows additional http fields to be included in eve-log + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + # set this value to one and only one among {both, request, response} + # to dump all http headers for every http request and/or response + # dump-all-headers: none + - dns: + # This configuration uses the new DNS logging format, + # the old configuration is still available: + # https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format + + # As of Suricata 5.0, version 2 of the eve dns output + # format is the default. + #version: 2 + + # Enable/disable this logger. Default: enabled. + #enabled: yes + + # Control logging of requests and responses: + # - requests: enable logging of DNS queries + # - responses: enable logging of DNS answers + # By default both requests and responses are logged. + #requests: no + #responses: no + + # Format of answer logging: + # - detailed: array item per answer + # - grouped: answers aggregated by type + # Default: all + #formats: [detailed, grouped] + + # Types to log, based on the query type. + # Default: all. + #types: [a, aaaa, cname, mx, ns, ptr, txt] + - tls: + extended: yes # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # custom allows to control which tls fields that are included + # in eve-log + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s] + - files: + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + #- drop: + # alerts: yes # log alerts that caused drops + # flows: all # start or all: 'start' logs only a single drop + # # per flow direction. All logs each dropped pkt. + - smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + #- dnp3 + - ftp + #- rdp + - nfs + - smb + - tftp + - ikev2 + - krb5 + - snmp + #- sip + - dhcp: + enabled: yes + # When extended mode is on, all DHCP messages are logged + # with full detail. When extended mode is off (the + # default), just enough information to map a MAC address + # to an IP address is logged. + extended: no + - ssh + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values + # bi-directional flows + - flow + # uni-directional flows + #- netflow + + # Metadata event type. Triggered whenever a pktvar is saved + # and will include the pktvars, flowvars, flowbits and + # flowints. + #- metadata + + # deprecated - unified2 alert format for use with Barnyard2 + - unified2-alert: + enabled: no + # for further options see: + # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#alert-output-for-use-with-barnyard2-unified2-alert + + # a line based log of HTTP requests (no alerts) + - http-log: + enabled: no + filename: http.log + append: yes + #extended: yes # enable this for extended logging information + #custom: yes # enabled the custom logging format (defined by customformat) + #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P" + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # a line based log of TLS handshake parameters (no alerts) + - tls-log: + enabled: no # Log TLS connections. + filename: tls.log # File to store TLS logs. + append: yes + #extended: yes # Log extended information like fingerprint + #custom: yes # enabled the custom logging format (defined by customformat) + #customformat: "%{%D-%H:%M:%S}t.%z %a:%p -> %A:%P %v %n %d %D" + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + + # output module to store certificates chain to disk + - tls-store: + enabled: no + #certs-log-dir: certs # directory to store the certificates files + + # Packet log... log packets in pcap format. 3 modes of operation: "normal" + # "multi" and "sguil". + # + # In normal mode a pcap file "filename" is created in the default-log-dir, + # or are as specified by "dir". + # In multi mode, a file is created per thread. This will perform much + # better, but will create multiple files where 'normal' would create one. + # In multi mode the filename takes a few special variables: + # - %n -- thread number + # - %i -- thread id + # - %t -- timestamp (secs or secs.usecs based on 'ts-format' + # E.g. filename: pcap.%n.%t + # + # Note that it's possible to use directories, but the directories are not + # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the + # per thread directory. + # + # Also note that the limit and max-files settings are enforced per thread. + # So the size limit when using 8 threads with 1000mb files and 2000 files + # is: 8*1000*2000 ~ 16TiB. + # + # In Sguil mode "dir" indicates the base directory. In this base dir the + # pcaps are created in th directory structure Sguil expects: + # + # $sguil-base-dir/YYYY-MM-DD/$filename. + # + # By default all packets are logged except: + # - TCP streams beyond stream.reassembly.depth + # - encrypted streams after the key exchange + # + - pcap-log: + enabled: no + filename: log.pcap + + # File size limit. Can be specified in kb, mb, gb. Just a number + # is parsed as bytes. + limit: 1000mb + + # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" + max-files: 2000 + + # Compression algorithm for pcap files. Possible values: none, lz4. + # Enabling compression is incompatible with the sguil mode. Note also + # that on Windows, enabling compression will *increase* disk I/O. + compression: none + + # Further options for lz4 compression. The compression level can be set + # to a value between 0 and 16, where higher values result in higher + # compression. + #lz4-checksum: no + #lz4-level: 0 + + mode: normal # normal, multi or sguil. + + # Directory to place pcap files. If not provided the default log + # directory will be used. Required for "sguil" mode. + #dir: /nsm_data/ + + #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec + use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets + honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged. + + # a full alerts log containing much information for signature writers + # or for investigating suspected false positives. + - alert-debug: + enabled: no + filename: alert-debug.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # alert output to prelude (https://www.prelude-siem.org/) only + # available if Suricata has been compiled with --enable-prelude + - alert-prelude: + enabled: no + profile: suricata + log-packet-content: no + log-packet-header: yes + + # Stats.log contains data from various counters of the Suricata engine. + - stats: + enabled: yes + filename: stats.log + append: no # append to file (yes) or overwrite it (no) + totals: yes # stats for all threads merged together + threads: no # per thread stats + null-values: yes # print counters that have value 0 + + # a line based alerts log similar to fast.log into syslog + - syslog: + enabled: no + # reported identity to syslog. If ommited the program name (usually + # suricata) will be used. + #identity: "suricata" + facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + + # deprecated a line based information for dropped packets in IPS mode + - drop: + enabled: no + # further options documented at: + # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#drop-log-a-line-based-information-for-dropped-packets + + # Output module for storing files on disk. Files are stored in a + # directory names consisting of the first 2 characters of the + # SHA256 of the file. Each file is given its SHA256 as a filename. + # + # When a duplicate file is found, the existing file is touched to + # have its timestamps updated. + # + # Unlike the older filestore, metadata is not written out by default + # as each file should already have a "fileinfo" record in the + # eve.log. If write-fileinfo is set to yes, the each file will have + # one more associated .json files that consists of the fileinfo + # record. A fileinfo file will be written for each occurrence of the + # file seen using a filename suffix to ensure uniqueness. + # + # To prune the filestore directory see the "suricatactl filestore + # prune" command which can delete files over a certain age. + - file-store: + version: 2 + enabled: no + + # Set the directory for the filestore. If the path is not + # absolute will be be relative to the default-log-dir. + #dir: filestore + + # Write out a fileinfo record for each occurrence of a + # file. Disabled by default as each occurrence is already logged + # as a fileinfo record to the main eve-log. + #write-fileinfo: yes + + # Force storing of all files. Default: no. + #force-filestore: yes + + # Override the global stream-depth for sessions in which we want + # to perform file extraction. Set to 0 for unlimited. + #stream-depth: 0 + + # Uncomment the following variable to define how many files can + # remain open for filestore by Suricata. Default value is 0 which + # means files get closed after each write + #max-open-files: 1000 + + # Force logging of checksums, available hash functions are md5, + # sha1 and sha256. Note that SHA256 is automatically forced by + # the use of this output module as it uses the SHA256 as the + # file naming scheme. + #force-hash: [sha1, md5] + # NOTE: X-Forwarded configuration is ignored if write-fileinfo is disabled + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + # deprecated - file-store v1 + - file-store: + enabled: no + # further options documented at: + # https://suricata.readthedocs.io/en/suricata-5.0.0/file-extraction/file-extraction.html#file-store-version-1 + + # Log TCP data after stream normalization + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per TCP session and stores the raw TCP data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by stream.reassembly.depth + - tcp-data: + enabled: no + type: file + filename: tcp-data.log + + # Log HTTP body data after normalization, dechunking and unzipping. + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per HTTP session and stores the normalized data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by the body limit settings + - http-body-data: + enabled: no + type: file + filename: http-data.log + + # Lua Output Support - execute lua script to generate alert and event + # output. + # Documented at: + # https://suricata.readthedocs.io/en/latest/output/lua-output.html + - lua: + enabled: no + #scripts-dir: /etc/suricata/lua-output/ + scripts: + # - script1.lua + +# Logging configuration. This is not about logging IDS alerts/events, but +# output about what Suricata is doing, like startup messages, errors, etc. +logging: + # The default log level, can be overridden in an output section. + # Note that debug level logging will only be emitted if Suricata was + # compiled with the --enable-debug configure option. + # + # This value is overridden by the SC_LOG_LEVEL env var. + default-log-level: notice + + # The default output format. Optional parameter, should default to + # something reasonable if not provided. Can be overridden in an + # output section. You can leave this out to get the default. + # + # This value is overridden by the SC_LOG_FORMAT env var. + #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " + + # A regex to filter output. Can be overridden in an output section. + # Defaults to empty (no filter). + # + # This value is overridden by the SC_LOG_OP_FILTER env var. + default-output-filter: + + # Define your logging outputs. If none are defined, or they are all + # disabled you will get the default - console output. + outputs: + - console: + enabled: yes + # type: json + - file: + enabled: yes + level: info + filename: suricata.log + # type: json + - syslog: + enabled: no + facility: local5 + format: "[%i] <%d> -- " + # type: json + + +## +## Step 4: configure common capture settings +## +## See "Advanced Capture Options" below for more options, including NETMAP +## and PF_RING. +## + +# Linux high speed capture support +af-packet: + - interface: vlan104009 + # Number of receive threads. "auto" uses the number of cores + #threads: auto + # Default clusterid. AF_PACKET will load balance packets based on flow. + cluster-id: 99 + # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. + # This is only supported for Linux kernel > 3.1 + # possible value are: + # * cluster_flow: all packets of a given flow are send to the same socket + # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket + # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same + # socket. Requires at least Linux 3.14. + # * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for + # more info. + # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system + # with capture card using RSS (require cpu affinity tuning and system irq tuning) + cluster-type: cluster_flow + # In some fragmentation case, the hash can not be computed. If "defrag" is set + # to yes, the kernel will do the needed defragmentation before sending the packets. + defrag: yes + # To use the ring feature of AF_PACKET, set 'use-mmap' to yes + #use-mmap: yes + # Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock + # your system + #mmap-locked: yes + # Use tpacket_v3 capture mode, only active if use-mmap is true + # Don't use it in IPS or TAP mode as it causes severe latency + #tpacket-v3: yes + # Ring size will be computed with respect to max_pending_packets and number + # of threads. You can set manually the ring size in number of packets by setting + # the following value. If you are using flow cluster-type and have really network + # intensive single-flow you could want to set the ring-size independently of the number + # of threads: + #ring-size: 2048 + # Block size is used by tpacket_v3 only. It should set to a value high enough to contain + # a decent number of packets. Size is in bytes so please consider your MTU. It should be + # a power of 2 and it must be multiple of page size (usually 4096). + #block-size: 32768 + # tpacket_v3 block timeout: an open block is passed to userspace if it is not + # filled after block-timeout milliseconds. + #block-timeout: 10 + # On busy system, this could help to set it to yes to recover from a packet drop + # phase. This will result in some packets (at max a ring flush) being non treated. + #use-emergency-flush: yes + # recv buffer size, increase value could improve performance + # buffer-size: 32768 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - kernel: use indication sent by kernel for each packet (default) + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: kernel + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + # You can use the following variables to activate AF_PACKET tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + #copy-mode: ips + #copy-iface: eth1 + # For eBPF and XDP setup including bypass, filter and load balancing, please + # see doc/userguide/capture-hardware/ebpf-xdp.rst for more info. + + # Put default values here. These will be used for an interface that is not + # in the list above. + - interface: default + #threads: auto + #use-mmap: no + #tpacket-v3: yes + +# Cross platform libpcap capture support +pcap: + - interface: eth0 + # On Linux, pcap will try to use mmaped capture and will use buffer-size + # as total of memory used by the ring. So set this to something bigger + # than 1% of your bandwidth. + #buffer-size: 16777216 + #bpf-filter: "tcp and port 25" + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: Suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # With some accelerator cards using a modified libpcap (like myricom), you + # may want to have the same number of capture threads as the number of capture + # rings. In this case, set up the threads variable to N to start N threads + # listening on the same interface. + #threads: 16 + # set to no to disable promiscuous mode: + #promisc: no + # set snaplen, if not set it defaults to MTU if MTU can be known + # via ioctl call and to full capture if not. + #snaplen: 1518 + # Put default values here + - interface: default + #checksum-checks: auto + +# Settings for reading pcap files +pcap-file: + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: Suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have checksum tested + checksum-checks: auto + +# See "Advanced Capture Options" below for more options, including NETMAP +# and PF_RING. + + +## +## Step 5: App Layer Protocol Configuration +## + +# Configure the app-layer parsers. The protocols section details each +# protocol. +# +# The option "enabled" takes 3 values - "yes", "no", "detection-only". +# "yes" enables both detection and the parser, "no" disables both, and +# "detection-only" enables protocol detection only (parser disabled). +app-layer: + protocols: + krb5: + enabled: yes + snmp: + enabled: yes + ikev2: + enabled: yes + tls: + enabled: yes + detection-ports: + dp: 443 + + # Generate JA3 fingerprint from client hello. If not specified it + # will be disabled by default, but enabled if rules require it. + #ja3-fingerprints: auto + + # What to do when the encrypted communications start: + # - default: keep tracking TLS session, check for protocol anomalies, + # inspect tls_* keywords. Disables inspection of unmodified + # 'content' signatures. + # - bypass: stop processing this flow as much as possible. No further + # TLS parsing and inspection. Offload flow bypass to kernel + # or hardware if possible. + # - full: keep tracking and inspection as normal. Unmodified content + # keyword signatures are inspected as well. + # + # For best performance, select 'bypass'. + # + #encryption-handling: default + + dcerpc: + enabled: yes + ftp: + enabled: yes + # memcap: 64mb + # RDP, disabled by default. + rdp: + #enabled: no + ssh: + enabled: yes + smtp: + enabled: yes + raw-extraction: no + # Configure SMTP-MIME Decoder + mime: + # Decode MIME messages from SMTP transactions + # (may be resource intensive) + # This field supercedes all others because it turns the entire + # process on or off + decode-mime: yes + + # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) + decode-base64: yes + decode-quoted-printable: yes + + # Maximum bytes per header data value stored in the data structure + # (default is 2000) + header-value-depth: 2000 + + # Extract URLs and save in state data structure + extract-urls: yes + # Set to yes to compute the md5 of the mail body. You will then + # be able to journalize it. + body-md5: no + # Configure inspected-tracker for file_data keyword + inspected-tracker: + content-limit: 100000 + content-inspect-min-size: 32768 + content-inspect-window: 4096 + imap: + enabled: detection-only + smb: + enabled: yes + detection-ports: + dp: 139, 445 + + # Stream reassembly size for SMB streams. By default track it completely. + #stream-depth: 0 + + nfs: + enabled: yes + tftp: + enabled: yes + dns: + # memcaps. Globally and per flow/state. + #global-memcap: 16mb + #state-memcap: 512kb + + # How many unreplied DNS requests are considered a flood. + # If the limit is reached, app-layer-event:dns.flooded; will match. + #request-flood: 500 + + tcp: + enabled: yes + detection-ports: + dp: 53 + udp: + enabled: yes + detection-ports: + dp: 53 + http: + enabled: yes + # memcap: Maximum memory capacity for http + # Default is unlimited, value can be such as 64mb + + # default-config: Used when no server-config matches + # personality: List of personalities used by default + # request-body-limit: Limit reassembly of request body for inspection + # by http_client_body & pcre /P option. + # response-body-limit: Limit reassembly of response body for inspection + # by file_data, http_server_body & pcre /Q option. + # + # For advanced options, see the user guide + + + # server-config: List of server configurations to use if address matches + # address: List of IP addresses or networks for this block + # personalitiy: List of personalities used by this block + # + # Then, all the fields from default-config can be overloaded + # + # Currently Available Personalities: + # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, + # IIS_7_0, IIS_7_5, Apache_2 + libhtp: + default-config: + personality: IDS + + # Can be specified in kb, mb, gb. Just a number indicates + # it's in bytes. + request-body-limit: 100kb + response-body-limit: 100kb + + # inspection limits + request-body-minimal-inspect-size: 32kb + request-body-inspect-window: 4kb + response-body-minimal-inspect-size: 40kb + response-body-inspect-window: 16kb + + # response body decompression (0 disables) + response-body-decompress-layer-limit: 2 + + # auto will use http-body-inline mode in IPS mode, yes or no set it statically + http-body-inline: auto + + # Decompress SWF files. + # 2 types: 'deflate', 'lzma', 'both' will decompress deflate and lzma + # compress-depth: + # Specifies the maximum amount of data to decompress, + # set 0 for unlimited. + # decompress-depth: + # Specifies the maximum amount of decompressed data to obtain, + # set 0 for unlimited. + swf-decompression: + enabled: yes + type: both + compress-depth: 0 + decompress-depth: 0 + + # Take a random value for inspection sizes around the specified value. + # This lower the risk of some evasion technics but could lead + # detection change between runs. It is set to 'yes' by default. + #randomize-inspection-sizes: yes + # If randomize-inspection-sizes is active, the value of various + # inspection size will be choosen in the [1 - range%, 1 + range%] + # range + # Default value of randomize-inspection-range is 10. + #randomize-inspection-range: 10 + + # decoding + double-decode-path: no + double-decode-query: no + + # Can disable LZMA decompression + #lzma-enabled: yes + # Memory limit usage for LZMA decompression dictionary + # Data is decompressed until dictionary reaches this size + #lzma-memlimit: 1mb + # Maximum decompressed size with a compression ratio + # above 2048 (only LZMA can reach this ratio, deflate cannot) + #compression-bomb-limit: 1mb + + server-config: + + #- apache: + # address: [192.168.1.0/24, 127.0.0.0/8, "::1"] + # personality: Apache_2 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + + #- iis7: + # address: + # - 192.168.0.0/24 + # - 192.168.10.0/24 + # personality: IIS_7_0 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + + # Note: Modbus probe parser is minimalist due to the poor significant field + # Only Modbus message length (greater than Modbus header length) + # And Protocol ID (equal to 0) are checked in probing parser + # It is important to enable detection port and define Modbus port + # to avoid false positive + modbus: + # How many unreplied Modbus requests are considered a flood. + # If the limit is reached, app-layer-event:modbus.flooded; will match. + #request-flood: 500 + + enabled: no + detection-ports: + dp: 502 + # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it + # is recommended to keep the TCP connection opened with a remote device + # and not to open and close it for each MODBUS/TCP transaction. In that + # case, it is important to set the depth of the stream reassembling as + # unlimited (stream.reassembly.depth: 0) + + # Stream reassembly size for modbus. By default track it completely. + stream-depth: 0 + + # DNP3 + dnp3: + enabled: no + detection-ports: + dp: 20000 + + # SCADA EtherNet/IP and CIP protocol support + enip: + enabled: no + detection-ports: + dp: 44818 + sp: 44818 + + ntp: + enabled: yes + + dhcp: + enabled: yes + + # SIP, disabled by default. + sip: + #enabled: no + +# Limit for the maximum number of asn1 frames to decode (default 256) +asn1-max-frames: 256 + + +############################################################################## +## +## Advanced settings below +## +############################################################################## + +## +## Run Options +## + +# Run suricata as user and group. +#run-as: +# user: suri +# group: suri + +# Some logging module will use that name in event as identifier. The default +# value is the hostname +#sensor-name: suricata + +# Default location of the pid file. The pid file is only used in +# daemon mode (start Suricata with -D). If not running in daemon mode +# the --pidfile command line option must be used to create a pid file. +#pid-file: /var/run/suricata.pid + +# Daemon working directory +# Suricata will change directory to this one if provided +# Default: "/" +#daemon-directory: "/" + +# Umask. +# Suricata will use this umask if it is provided. By default it will use the +# umask passed on by the shell. +#umask: 022 + +# Suricata core dump configuration. Limits the size of the core dump file to +# approximately max-dump. The actual core dump size will be a multiple of the +# page size. Core dumps that would be larger than max-dump are truncated. On +# Linux, the actual core dump size may be a few pages larger than max-dump. +# Setting max-dump to 0 disables core dumping. +# Setting max-dump to 'unlimited' will give the full core dump file. +# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size +# to be 'unlimited'. + +coredump: + max-dump: unlimited + +# If Suricata box is a router for the sniffed networks, set it to 'router'. If +# it is a pure sniffing setup, set it to 'sniffer-only'. +# If set to auto, the variable is internally switch to 'router' in IPS mode +# and 'sniffer-only' in IDS mode. +# This feature is currently only used by the reject* keywords. +host-mode: auto + +# Number of packets preallocated per thread. The default is 1024. A higher number +# will make sure each CPU will be more easily kept busy, but may negatively +# impact caching. +#max-pending-packets: 1024 + +# Runmode the engine should use. Please check --list-runmodes to get the available +# runmodes for each packet acquisition method. Default depends on selected capture +# method. 'workers' generally gives best performance. +#runmode: autofp + +# Specifies the kind of flow load balancer used by the flow pinned autofp mode. +# +# Supported schedulers are: +# +# hash - Flow assigned to threads using the 5-7 tuple hash. +# ippair - Flow assigned to threads using addresses only. +# +#autofp-scheduler: hash + +# Preallocated size for packet. Default is 1514 which is the classical +# size for pcap on ethernet. You should adjust this value to the highest +# packet size (MTU + hardware header) on your system. +#default-packet-size: 1514 + +# Unix command socket can be used to pass commands to Suricata. +# An external tool can then connect to get information from Suricata +# or trigger some modifications of the engine. Set enabled to yes +# to activate the feature. In auto mode, the feature will only be +# activated in live capture mode. You can use the filename variable to set +# the file name of the socket. +unix-command: + enabled: true + filename: /run/suricata-command.socket + +# Magic file. The extension .mgc is added to the value here. +#magic-file: /usr/share/file/magic +#magic-file: + +# GeoIP2 database file. Specify path and filename of GeoIP2 database +# if using rules with "geoip" rule option. +#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb + +legacy: + uricontent: enabled + +## +## Detection settings +## + +# Set the order of alerts based on actions +# The default order is pass, drop, reject, alert +# action-order: +# - pass +# - drop +# - reject +# - alert + +# IP Reputation +#reputation-categories-file: /etc/suricata/iprep/categories.txt +#default-reputation-path: /etc/suricata/iprep +#reputation-files: +# - reputation.list + +# When run with the option --engine-analysis, the engine will read each of +# the parameters below, and print reports for each of the enabled sections +# and exit. The reports are printed to a file in the default log dir +# given by the parameter "default-log-dir", with engine reporting +# subsection below printing reports in its own report file. +engine-analysis: + # enables printing reports for fast-pattern for every rule. + rules-fast-pattern: yes + # enables printing reports for each rule + rules: yes + +#recursion and match limits for PCRE where supported +pcre: + match-limit: 3500 + match-limit-recursion: 1500 + +## +## Advanced Traffic Tracking and Reconstruction Settings +## + +# Host specific policies for defragmentation and TCP stream +# reassembly. The host OS lookup is done using a radix tree, just +# like a routing table so the most specific entry matches. +host-os-policy: + # Make the default policy windows. + windows: [0.0.0.0/0] + bsd: [] + bsd-right: [] + old-linux: [] + linux: [] + old-solaris: [] + solaris: [] + hpux10: [] + hpux11: [] + irix: [] + macos: [] + vista: [] + windows2k3: [] + +# Defrag settings: + +defrag: + memcap: 32mb + hash-size: 65536 + trackers: 65535 # number of defragmented flows to follow + max-frags: 65535 # number of fragments to keep (higher than trackers) + prealloc: yes + timeout: 60 + +# Enable defrag per host settings +# host-config: +# +# - dmz: +# timeout: 30 +# address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"] +# +# - lan: +# timeout: 45 +# address: +# - 192.168.0.0/24 +# - 192.168.10.0/24 +# - 172.16.14.0/24 + +# Flow settings: +# By default, the reserved memory (memcap) for flows is 32MB. This is the limit +# for flow allocation inside the engine. You can change this value to allow +# more memory usage for flows. +# The hash-size determine the size of the hash used to identify flows inside +# the engine, and by default the value is 65536. +# At the startup, the engine can preallocate a number of flows, to get a better +# performance. The number of flows preallocated is 10000 by default. +# emergency-recovery is the percentage of flows that the engine need to +# prune before unsetting the emergency state. The emergency state is activated +# when the memcap limit is reached, allowing to create new flows, but +# pruning them with the emergency timeouts (they are defined below). +# If the memcap is reached, the engine will try to prune flows +# with the default timeouts. If it doesn't find a flow to prune, it will set +# the emergency bit and it will try again with more aggressive timeouts. +# If that doesn't work, then it will try to kill the last time seen flows +# not in use. +# The memcap can be specified in kb, mb, gb. Just a number indicates it's +# in bytes. + +flow: + memcap: 128mb + hash-size: 65536 + prealloc: 10000 + emergency-recovery: 30 + #managers: 1 # default to one flow manager + #recyclers: 1 # default to one flow recycler thread + +# This option controls the use of vlan ids in the flow (and defrag) +# hashing. Normally this should be enabled, but in some (broken) +# setups where both sides of a flow are not tagged with the same vlan +# tag, we can ignore the vlan id's in the flow hashing. +vlan: + use-for-tracking: true + +# Specific timeouts for flows. Here you can specify the timeouts that the +# active flows will wait to transit from the current state to another, on each +# protocol. The value of "new" determine the seconds to wait after a handshake or +# stream startup before the engine free the data of that flow it doesn't +# change the state to established (usually if we don't receive more packets +# of that flow). The value of "established" is the amount of +# seconds that the engine will wait to free the flow if it spend that amount +# without receiving new packets or closing the connection. "closed" is the +# amount of time to wait after a flow is closed (usually zero). "bypassed" +# timeout controls locally bypassed flows. For these flows we don't do any other +# tracking. If no packets have been seen after this timeout, the flow is discarded. +# +# There's an emergency mode that will become active under attack circumstances, +# making the engine to check flow status faster. This configuration variables +# use the prefix "emergency-" and work similar as the normal ones. +# Some timeouts doesn't apply to all the protocols, like "closed", for udp and +# icmp. + +flow-timeouts: + + default: + new: 30 + established: 300 + closed: 0 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-closed: 0 + emergency-bypassed: 50 + tcp: + new: 60 + established: 600 + closed: 60 + bypassed: 100 + emergency-new: 5 + emergency-established: 100 + emergency-closed: 10 + emergency-bypassed: 50 + udp: + new: 30 + established: 300 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-bypassed: 50 + icmp: + new: 30 + established: 300 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-bypassed: 50 + +# Stream engine settings. Here the TCP stream tracking and reassembly +# engine is configured. +# +# stream: +# memcap: 32mb # Can be specified in kb, mb, gb. Just a +# # number indicates it's in bytes. +# checksum-validation: yes # To validate the checksum of received +# # packet. If csum validation is specified as +# # "yes", then packet with invalid csum will not +# # be processed by the engine stream/app layer. +# # Warning: locally generated traffic can be +# # generated without checksum due to hardware offload +# # of checksum. You can control the handling of checksum +# # on a per-interface basis via the 'checksum-checks' +# # option +# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread +# midstream: false # don't allow midstream session pickups +# async-oneside: false # don't enable async stream handling +# inline: no # stream inline mode +# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine +# max-synack-queued: 5 # Max different SYN/ACKs to queue +# bypass: no # Bypass packets when stream.reassembly.depth is reached. +# # Warning: first side to reach this triggers +# # the bypass. +# +# reassembly: +# memcap: 64mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# depth: 1mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# toclient-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# randomize-chunk-size: yes # Take a random value for chunk size around the specified value. +# # This lower the risk of some evasion technics but could lead +# # detection change between runs. It is set to 'yes' by default. +# randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is +# # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size +# # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same +# # calculation for toclient-chunk-size. +# # Default value of randomize-chunk-range is 10. +# +# raw: yes # 'Raw' reassembly enabled or disabled. +# # raw is for content inspection by detection +# # engine. +# +# segment-prealloc: 2048 # number of segments preallocated per thread +# +# check-overlap-different-data: true|false +# # check if a segment contains different data +# # than what we've already seen for that +# # position in the stream. +# # This is enabled automatically if inline mode +# # is used or when stream-event:reassembly_overlap_different_data; +# # is used in a rule. +# +stream: + memcap: 64mb + checksum-validation: yes # reject wrong csums + inline: auto # auto will use inline mode in IPS mode, yes or no set it statically + reassembly: + memcap: 256mb + depth: 1mb # reassemble 1mb into a stream + toserver-chunk-size: 2560 + toclient-chunk-size: 2560 + randomize-chunk-size: yes + #randomize-chunk-range: 10 + #raw: yes + #segment-prealloc: 2048 + #check-overlap-different-data: true + +# Host table: +# +# Host table is used by tagging and per host thresholding subsystems. +# +host: + hash-size: 4096 + prealloc: 1000 + memcap: 32mb + +# IP Pair table: +# +# Used by xbits 'ippair' tracking. +# +#ippair: +# hash-size: 4096 +# prealloc: 1000 +# memcap: 32mb + +# Decoder settings + +decoder: + # Teredo decoder is known to not be completely accurate + # as it will sometimes detect non-teredo as teredo. + teredo: + enabled: true + # VXLAN decoder is assigned to up to 4 UDP ports. By default only the + # IANA assigned port 4789 is enabled. + vxlan: + enabled: true + ports: $VXLAN_PORTS # syntax: '8472, 4789' + + +## +## Performance tuning and profiling +## + +# The detection engine builds internal groups of signatures. The engine +# allow us to specify the profile to use for them, to manage memory on an +# efficient way keeping a good performance. For the profile keyword you +# can use the words "low", "medium", "high" or "custom". If you use custom +# make sure to define the values at "- custom-values" as your convenience. +# Usually you would prefer medium/high/low. +# +# "sgh mpm-context", indicates how the staging should allot mpm contexts for +# the signature groups. "single" indicates the use of a single context for +# all the signature group heads. "full" indicates a mpm-context for each +# group head. "auto" lets the engine decide the distribution of contexts +# based on the information the engine gathers on the patterns from each +# group head. +# +# The option inspection-recursion-limit is used to limit the recursive calls +# in the content inspection code. For certain payload-sig combinations, we +# might end up taking too much time in the content inspection code. +# If the argument specified is 0, the engine uses an internally defined +# default limit. On not specifying a value, we use no limits on the recursion. +detect: + profile: medium + custom-values: + toclient-groups: 3 + toserver-groups: 25 + sgh-mpm-context: auto + inspection-recursion-limit: 3000 + # If set to yes, the loading of signatures will be made after the capture + # is started. This will limit the downtime in IPS mode. + #delayed-detect: yes + + prefilter: + # default prefiltering setting. "mpm" only creates MPM/fast_pattern + # engines. "auto" also sets up prefilter engines for other keywords. + # Use --list-keywords=all to see which keywords support prefiltering. + default: mpm + + # the grouping values above control how many groups are created per + # direction. Port whitelisting forces that port to get it's own group. + # Very common ports will benefit, as well as ports with many expensive + # rules. + grouping: + #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080 + #udp-whitelist: 53, 135, 5060 + + profiling: + # Log the rules that made it past the prefilter stage, per packet + # default is off. The threshold setting determines how many rules + # must have made it past pre-filter for that rule to trigger the + # logging. + #inspect-logging-threshold: 200 + grouping: + dump-to-disk: false + include-rules: false # very verbose + include-mpm-stats: false + +# Select the multi pattern algorithm you want to run for scan/search the +# in the engine. +# +# The supported algorithms are: +# "ac" - Aho-Corasick, default implementation +# "ac-bs" - Aho-Corasick, reduced memory implementation +# "ac-ks" - Aho-Corasick, "Ken Steele" variant +# "hs" - Hyperscan, available when built with Hyperscan support +# +# The default mpm-algo value of "auto" will use "hs" if Hyperscan is +# available, "ac" otherwise. +# +# The mpm you choose also decides the distribution of mpm contexts for +# signature groups, specified by the conf - "detect.sgh-mpm-context". +# Selecting "ac" as the mpm would require "detect.sgh-mpm-context" +# to be set to "single", because of ac's memory requirements, unless the +# ruleset is small enough to fit in one's memory, in which case one can +# use "full" with "ac". Rest of the mpms can be run in "full" mode. + +mpm-algo: auto + +# Select the matching algorithm you want to use for single-pattern searches. +# +# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only +# available if Suricata has been built with Hyperscan support). +# +# The default of "auto" will use "hs" if available, otherwise "bm". + +spm-algo: auto + +# Suricata is multi-threaded. Here the threading can be influenced. +threading: + set-cpu-affinity: no + # Tune cpu affinity of threads. Each family of threads can be bound + # on specific CPUs. + # + # These 2 apply to the all runmodes: + # management-cpu-set is used for flow timeout handling, counters + # worker-cpu-set is used for 'worker' threads + # + # Additionally, for autofp these apply: + # receive-cpu-set is used for capture threads + # verdict-cpu-set is used for IPS verdict threads + # + cpu-affinity: + - management-cpu-set: + cpu: [ 0 ] # include only these CPUs in affinity settings + - receive-cpu-set: + cpu: [ 0 ] # include only these CPUs in affinity settings + - worker-cpu-set: + cpu: [ "all" ] + mode: "exclusive" + # Use explicitely 3 threads and don't compute number by using + # detect-thread-ratio variable: + # threads: 3 + prio: + low: [ 0 ] + medium: [ "1-2" ] + high: [ 3 ] + default: "medium" + #- verdict-cpu-set: + # cpu: [ 0 ] + # prio: + # default: "high" + # + # By default Suricata creates one "detect" thread per available CPU/CPU core. + # This setting allows controlling this behaviour. A ratio setting of 2 will + # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this + # will result in 4 detect threads. If values below 1 are used, less threads + # are created. So on a dual core CPU a setting of 0.5 results in 1 detect + # thread being created. Regardless of the setting at a minimum 1 detect + # thread will always be created. + # + detect-thread-ratio: 0.2 + +# Luajit has a strange memory requirement, it's 'states' need to be in the +# first 2G of the process' memory. +# +# 'luajit.states' is used to control how many states are preallocated. +# State use: per detect script: 1 per detect thread. Per output script: 1 per +# script. +luajit: + states: 128 + +# Profiling settings. Only effective if Suricata has been built with the +# the --enable-profiling configure flag. +# +profiling: + # Run profiling for every xth packet. The default is 1, which means we + # profile every packet. If set to 1000, one packet is profiled for every + # 1000 received. + #sample-rate: 1000 + + # rule profiling + rules: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: rule_perf.log + append: yes + + # Sort options: ticks, avgticks, checks, matches, maxticks + # If commented out all the sort options will be used. + #sort: avgticks + + # Limit the number of sids for which stats are shown at exit (per sort). + limit: 10 + + # output to json + json: yes + + # per keyword profiling + keywords: + enabled: yes + filename: keyword_perf.log + append: yes + + prefilter: + enabled: yes + filename: prefilter_perf.log + append: yes + + # per rulegroup profiling + rulegroups: + enabled: yes + filename: rule_group_perf.log + append: yes + + # packet profiling + packets: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: packet_stats.log + append: yes + + # per packet csv output + csv: + + # Output can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: no + filename: packet_stats.csv + + # profiling of locking. Only available when Suricata was built with + # --enable-profiling-locks. + locks: + enabled: no + filename: lock_stats.log + append: yes + + pcap-log: + enabled: no + filename: pcaplog_stats.log + append: yes + +## +## Netfilter integration +## + +# When running in NFQ inline mode, it is possible to use a simulated +# non-terminal NFQUEUE verdict. +# This permit to do send all needed packet to Suricata via this a rule: +# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE +# And below, you can have your standard filtering ruleset. To activate +# this mode, you need to set mode to 'repeat' +# If you want packet to be sent to another queue after an ACCEPT decision +# set mode to 'route' and set next-queue value. +# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance +# by processing several packets before sending a verdict (worker runmode only). +# On linux >= 3.6, you can set the fail-open option to yes to have the kernel +# accept the packet if Suricata is not able to keep pace. +# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is +# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask +# on packet of a flow that need to be bypassed. The Nefilter ruleset has to +# directly accept all packets of a flow once a packet has been marked. +nfq: +# mode: accept +# repeat-mark: 1 +# repeat-mask: 1 +# bypass-mark: 1 +# bypass-mask: 1 +# route-queue: 2 +# batchcount: 20 +# fail-open: yes + +#nflog support +nflog: + # netlink multicast group + # (the same as the iptables --nflog-group param) + # Group 0 is used by the kernel, so you can't use it + - group: 2 + # netlink buffer size + buffer-size: 18432 + # put default value here + - group: default + # set number of packet to queue inside kernel + qthreshold: 1 + # set the delay before flushing packet in the queue inside kernel + qtimeout: 100 + # netlink max buffer size + max-size: 20000 + +## +## Advanced Capture Options +## + +# general settings affecting packet capture +capture: + # disable NIC offloading. It's restored when Suricata exits. + # Enabled by default. + #disable-offloading: false + # + # disable checksum validation. Same as setting '-k none' on the + # commandline. + #checksum-validation: none + +# Netmap support +# +# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which have +# built-in netmap support or compile and install netmap module and appropriate +# NIC driver on your Linux system. +# To reach maximum throughput disable all receive-, segmentation-, +# checksum- offloadings on NIC. +# Disabling Tx checksum offloading is *required* for connecting OS endpoint +# with NIC endpoint. +# You can find more information at https://github.com/luigirizzo/netmap +# +netmap: + # To specify OS endpoint add plus sign at the end (e.g. "eth0+") + - interface: eth2 + # Number of capture threads. "auto" uses number of RSS queues on interface. + # Warning: unless the RSS hashing is symmetrical, this will lead to + # accuracy issues. + #threads: auto + # You can use the following variables to activate netmap tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + # To specify the OS as the copy-iface (so the OS can route packets, or forward + # to a service running on the same machine) add a plus sign at the end + # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0 + # for return packets. Hardware checksumming must be *off* on the interface if + # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD + # or 'ethtool -K eth0 tx off rx off' for Linux). + #copy-mode: tap + #copy-iface: eth3 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: Suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + #- interface: eth3 + #threads: auto + #copy-mode: tap + #copy-iface: eth2 + # Put default values here + - interface: default + +# PF_RING configuration. for use with native PF_RING support +# for more info see http://www.ntop.org/products/pf_ring/ +pfring: + - interface: eth0 + # Number of receive threads. If set to 'auto' Suricata will first try + # to use CPU (core) count and otherwise RSS queue count. + threads: auto + + # Default clusterid. PF_RING will load balance packets based on flow. + # All threads/processes that will participate need to have the same + # clusterid. + cluster-id: 99 + + # Default PF_RING cluster type. PF_RING can load balance per flow. + # Possible values are cluster_flow or cluster_round_robin. + cluster-type: cluster_flow + + # bpf filter for this interface + #bpf-filter: tcp + + # If bypass is set then the PF_RING hw bypass is activated, when supported + # by the interface in use. Suricata will instruct the interface to bypass + # all future packets for a flow that need to be bypassed. + #bypass: yes + + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - rxonly: only compute checksum for packets received by network card. + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: Suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # Second interface + #- interface: eth1 + # threads: 3 + # cluster-id: 93 + # cluster-type: cluster_flow + # Put default values here + - interface: default + #threads: 2 + +# For FreeBSD ipfw(8) divert(4) support. +# Please make sure you have ipfw_load="YES" and ipdivert_load="YES" +# in /etc/loader.conf or kldload'ing the appropriate kernel modules. +# Additionally, you need to have an ipfw rule for the engine to see +# the packets from ipfw. For Example: +# +# ipfw add 100 divert 8000 ip from any to any +# +# The 8000 above should be the same number you passed on the command +# line, i.e. -d 8000 +# +ipfw: + + # Reinject packets at the specified ipfw rule number. This config + # option is the ipfw rule number AT WHICH rule processing continues + # in the ipfw processing system after the engine has finished + # inspecting the packet for acceptance. If no rule number is specified, + # accepted packets are reinjected at the divert rule which they entered + # and IPFW rule processing continues. No check is done to verify + # this will rule makes sense so care must be taken to avoid loops in ipfw. + # + ## The following example tells the engine to reinject packets + # back into the ipfw firewall AT rule number 5500: + # + # ipfw-reinjection-rule-number: 5500 + + +napatech: + # The Host Buffer Allowance for all streams + # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) + # This may be enabled when sharing streams with another application. + # Otherwise, it should be turned off. + #hba: -1 + + # When use_all_streams is set to "yes" the initialization code will query + # the Napatech service for all configured streams and listen on all of them. + # When set to "no" the streams config array will be used. + # + # This option necessitates running the appropriate NTPL commands to create + # the desired streams prior to running suricata. + #use-all-streams: no + + # The streams to listen on when auto-config is disabled or when and threading + # cpu-affinity is disabled. This can be either: + # an individual stream (e.g. streams: [0]) + # or + # a range of streams (e.g. streams: ["0-3"]) + # + streams: ["0-3"] + + # When auto-config is enabled the streams will be created and assigned + # automatically to the NUMA node where the thread resides. If cpu-affinity + # is enabled in the threading section. Then the streams will be created + # according to the number of worker threads specified in the worker cpu set. + # Otherwise, the streams array is used to define the streams. + # + # This option cannot be used simultaneous with "use-all-streams". + # + auto-config: yes + + # Ports indicates which napatech ports are to be used in auto-config mode. + # these are the port ID's of the ports that will be merged prior to the + # traffic being distributed to the streams. + # + # This can be specified in any of the following ways: + # + # a list of individual ports (e.g. ports: [0,1,2,3]) + # + # a range of ports (e.g. ports: [0-3]) + # + # "all" to indicate that all ports are to be merged together + # (e.g. ports: [all]) + # + # This has no effect if auto-config is disabled. + # + ports: [all] + + # When auto-config is enabled the hashmode specifies the algorithm for + # determining to which stream a given packet is to be delivered. + # This can be any valid Napatech NTPL hashmode command. + # + # The most common hashmode commands are: hash2tuple, hash2tuplesorted, + # hash5tuple, hash5tuplesorted and roundrobin. + # + # See Napatech NTPL documentation other hashmodes and details on their use. + # + # This has no effect if auto-config is disabled. + # + hashmode: hash5tuplesorted + +## +## Configure Suricata to load Suricata-Update managed rules. +## +## If this section is completely commented out move down to the "Advanced rule +## file configuration". +## + +default-rule-path: /var/lib/suricata/rules + +rule-files: + - suricata.rules + +## +## Auxiliary configuration files. +## + +classification-file: /etc/suricata/classification.config +reference-config-file: /etc/suricata/reference.config +# threshold-file: /etc/suricata/threshold.config + +## +## Include other configs +## + +# Includes. Files included here will be handled as if they were +# inlined in this configuration file. +#include: include1.yaml +#include: include2.yaml diff --git a/pkg/services/suricata/test/suricata_defaults b/pkg/services/suricata/test/suricata_defaults new file mode 100644 index 0000000..d35a3b0 --- /dev/null +++ b/pkg/services/suricata/test/suricata_defaults @@ -0,0 +1,29 @@ +# Default config for Suricata in /etc/default + +# set to yes to start the server in the init.d script +RUN=yes + +# set to user that will run suricata in the init.d script (used for dropping privileges only) +RUN_AS_USER= + +# Configuration file to load +SURCONF=/etc/suricata/suricata.yaml + +# Listen mode: pcap, nfqueue, custom_nfqueue or af-packet +# depending on this value, only one of the two following options +# will be used (af-packet uses neither). +# Please note that IPS mode is only available when using nfqueue +LISTENMODE=af-packet + +# Interface to listen on (for pcap mode) +IFACE=vlan104009 + +# Queue number to listen on (for nfqueue mode) +NFQUEUE="-q 0" + +# Queue numbers to listen on (for custom_nfqueue mode) +# Multiple queues can be specified +CUSTOM_NFQUEUE="-q 0 -q 1 -q 2 -q 3" + +# Pid file +PIDFILE=/var/run/suricata.pid diff --git a/pkg/services/tailscale/tailscale.go b/pkg/services/tailscale/tailscale.go new file mode 100644 index 0000000..04747cc --- /dev/null +++ b/pkg/services/tailscale/tailscale.go @@ -0,0 +1,89 @@ +package tailscale + +import ( + "context" + "log/slog" + + systemd_renderer "github.com/metal-stack/os-installer/pkg/systemd-service-renderer" + "github.com/spf13/afero" + + _ "embed" +) + +const ( + tailscaleServiceName = "tailscale.service" + tailscaleServiceUnitPath = "/etc/systemd/system/" + tailscaleServiceName + + tailscaledServiceName = "tailscaled.service" + tailscaledServiceUnitPath = "/etc/systemd/system/" + tailscaledServiceName + + tailscaledDefaultPort = "41641" +) + +var ( + //go:embed tailscale.service.tpl + tailscaleTemplateString string + //go:embed tailscaled.service.tpl + tailscaledTemplateString string +) + +type Config struct { + Log *slog.Logger + Enable bool + Reload bool + fs afero.Fs +} + +type TemplateData struct { + Comment string + DefaultRouteVrf string + TailscaledPort string + MachineID string + AuthKey string + Address string +} + +func WriteSystemdUnit(ctx context.Context, cfg *Config, c *TemplateData) (changed bool, err error) { + if c.TailscaledPort == "" { + c.TailscaledPort = tailscaledDefaultPort + } + + for _, spec := range []struct { + servicePath string + serviceName string + templateString string + }{ + { + servicePath: tailscaleServiceUnitPath, + serviceName: tailscaleServiceName, + templateString: tailscaleTemplateString, + }, + { + servicePath: tailscaledServiceUnitPath, + serviceName: tailscaledServiceName, + templateString: tailscaledTemplateString, + }, + } { + r, err := systemd_renderer.New(&systemd_renderer.Config{ + ServiceName: spec.serviceName, + Enable: cfg.Enable, + Log: cfg.Log, + TemplateString: spec.templateString, + Data: c, + Fs: cfg.fs, + }) + if err != nil { + return false, err + } + + chg, err := r.Render(ctx, spec.servicePath, cfg.Reload) + if err != nil { + return chg, err + } + + // return changed if one has changed + changed = changed || chg + } + + return changed, nil +} diff --git a/pkg/network/tpl/tailscale.service.tpl b/pkg/services/tailscale/tailscale.service.tpl similarity index 91% rename from pkg/network/tpl/tailscale.service.tpl rename to pkg/services/tailscale/tailscale.service.tpl index 2364d65..a026fe5 100644 --- a/pkg/network/tpl/tailscale.service.tpl +++ b/pkg/services/tailscale/tailscale.service.tpl @@ -10,4 +10,4 @@ ExecStart=/bin/ip vrf exec {{ .DefaultRouteVrf }} /usr/local/bin/tailscale up -- Restart=on-failure [Install] -WantedBy=multi-user.target \ No newline at end of file +WantedBy=multi-user.target diff --git a/pkg/services/tailscale/tailscale_test.go b/pkg/services/tailscale/tailscale_test.go new file mode 100644 index 0000000..c5dab42 --- /dev/null +++ b/pkg/services/tailscale/tailscale_test.go @@ -0,0 +1,83 @@ +package tailscale + +import ( + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed test/tailscale.service + expectedTailscaleSystemdUnit string + //go:embed test/tailscaled.service + expectedTailscaledSystemdUnit string +) + +func TestWriteSystemdUnit(t *testing.T) { + tests := []struct { + name string + c *TemplateData + wantTailscaleService string + wantTailscaledService string + wantChanged bool + wantErr error + }{ + { + name: "render", + c: &TemplateData{ + Comment: `Do not edit.`, + DefaultRouteVrf: "vrf104009", + TailscaledPort: "41641", + MachineID: "c0115b51-5e4d-4f92-85c8-1cc504eafdd2", + AuthKey: "a-authkey", + Address: "headscale.metal-stack.io", + }, + wantTailscaleService: expectedTailscaleSystemdUnit, + wantTailscaledService: expectedTailscaledSystemdUnit, + wantChanged: true, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + gotChanged, gotErr := WriteSystemdUnit(t.Context(), &Config{ + Log: slog.Default(), + Reload: false, + fs: fs, + }, tt.c) + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(tailscaleServiceUnitPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantTailscaleService, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + + content, err = fs.ReadFile(tailscaledServiceUnitPath) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantTailscaledService, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/pkg/network/tpl/tailscaled.service.tpl b/pkg/services/tailscale/tailscaled.service.tpl similarity index 96% rename from pkg/network/tpl/tailscaled.service.tpl rename to pkg/services/tailscale/tailscaled.service.tpl index 3db63ad..66a4983 100644 --- a/pkg/network/tpl/tailscaled.service.tpl +++ b/pkg/services/tailscale/tailscaled.service.tpl @@ -22,4 +22,4 @@ CacheDirectory=tailscale CacheDirectoryMode=0750 [Install] -WantedBy=multi-user.target \ No newline at end of file +WantedBy=multi-user.target diff --git a/pkg/services/tailscale/test/tailscale.service b/pkg/services/tailscale/test/tailscale.service new file mode 100644 index 0000000..10a9d7e --- /dev/null +++ b/pkg/services/tailscale/test/tailscale.service @@ -0,0 +1,13 @@ +[Unit] +Description=Tailscale client +After=tailscaled.service + +[Service] +LimitMEMLOCK=infinity +User=root +Group=root +ExecStart=/bin/ip vrf exec vrf104009 /usr/local/bin/tailscale up --hostname c0115b51-5e4d-4f92-85c8-1cc504eafdd2 --auth-key a-authkey --login-server headscale.metal-stack.io +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/pkg/services/tailscale/test/tailscaled.service b/pkg/services/tailscale/test/tailscaled.service new file mode 100644 index 0000000..7d70c44 --- /dev/null +++ b/pkg/services/tailscale/test/tailscaled.service @@ -0,0 +1,25 @@ +[Unit] +Description=Tailscale node agent +Documentation=https://tailscale.com/kb/ +After=network.target + +[Service] +LimitMEMLOCK=infinity +User=root +Group=root +Type=notify +Environment="TS_NO_LOGS_NO_SUPPORT=true" +ExecStartPre=ip vrf exec vrf104009 /usr/local/bin/tailscaled --cleanup +ExecStart=/bin/ip vrf exec vrf104009 /usr/local/bin/tailscaled --port 41641 +ExecStopPost=ip vrf exec vrf104009 /usr/local/bin/tailscaled --cleanup +Restart=on-failure + +RuntimeDirectory=tailscale +RuntimeDirectoryMode=0755 +StateDirectory=tailscale +StateDirectoryMode=0700 +CacheDirectory=tailscale +CacheDirectoryMode=0750 + +[Install] +WantedBy=multi-user.target diff --git a/pkg/systemd-service-renderer/systemd_renderer.go b/pkg/systemd-service-renderer/systemd_renderer.go new file mode 100644 index 0000000..7354b71 --- /dev/null +++ b/pkg/systemd-service-renderer/systemd_renderer.go @@ -0,0 +1,128 @@ +package systemd_renderer + +import ( + "context" + "fmt" + "log/slog" + "time" + + "github.com/coreos/go-systemd/v22/dbus" + "github.com/metal-stack/os-installer/pkg/exec" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" + "github.com/spf13/afero" +) + +type ( + Config struct { + ServiceName string + Enable bool + Log *slog.Logger + TemplateString string + Data any + // Validate allows the validation of the rendered template on a given temp file path, optional + Validate func(path string) error + Fs afero.Fs + } + + systemdRenderer struct { + log *slog.Logger + r *renderer.Renderer + serviceName string + enable bool + } +) + +// New returns a new system service renderer +func New(c *Config) (*systemdRenderer, error) { + if c == nil { + return nil, fmt.Errorf("systemd service renderer config is nil") + } + + r, err := renderer.New(&renderer.Config{ + Log: c.Log.With("service-name", c.ServiceName), + TemplateString: c.TemplateString, + Data: c.Data, + Validate: c.Validate, + Fs: c.Fs, + }) + if err != nil { + return nil, err + } + + return &systemdRenderer{ + log: c.Log.WithGroup("systemd-service-renderer").With("service-name", c.ServiceName), + serviceName: c.ServiceName, + r: r, + enable: c.Enable, + }, nil +} + +// Render renders the given template to the given destination and reloads the unit if requested. +// Returns true when the template has changed. +func (r *systemdRenderer) Render(ctx context.Context, destFile string, reload bool) (changed bool, err error) { + r.log.Info("rendering systemd service template file") + + changed, err = r.r.Render(ctx, destFile) + if err != nil { + return changed, err + } + + if r.enable { + if err := Enable(ctx, r.log, r.serviceName); err != nil { + return changed, err + } + } + + if !reload { + return changed, nil + } + + if err := Reload(ctx, r.log, r.serviceName); err != nil { + return true, err + } + + return true, err +} + +func Reload(ctx context.Context, log *slog.Logger, unitName string) error { + const done = "done" + + log.Info("reloading systemd service unit", "unit", unitName) + + dbc, err := dbus.NewWithContext(ctx) + if err != nil { + return fmt.Errorf("unable to connect to dbus to reload unit:%s %w", unitName, err) + } + defer dbc.Close() + + c := make(chan string) + + if _, err = dbc.ReloadUnitContext(ctx, unitName, "replace", c); err != nil { + return err + } + + job := <-c + + if job != done { + return fmt.Errorf("reloading of unit:%s failed: %s", unitName, job) + } + + return nil +} + +func Enable(ctx context.Context, log *slog.Logger, unitName string) error { + log.Info("enable systemd service unit", "unit-name", unitName) + + ex := exec.New(log) + + out, err := ex.Execute(ctx, &exec.Params{ + Name: "bash", + Args: []string{"-c", fmt.Sprintf("systemctl enable %s", unitName)}, + Timeout: 10 * time.Second, + }) + if err != nil { + // Do not error out because some service can be enabled, but the enable command returns an error. + log.Error("unable to enable systemd unit", "unit", unitName, "output", out, "error", err) + } + return nil +} diff --git a/pkg/template-renderer/renderer.go b/pkg/template-renderer/renderer.go new file mode 100644 index 0000000..f945e66 --- /dev/null +++ b/pkg/template-renderer/renderer.go @@ -0,0 +1,149 @@ +package renderer + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "fmt" + "io" + "log/slog" + "os" + "text/template" + + "github.com/Masterminds/sprig/v3" + "github.com/google/uuid" + "github.com/spf13/afero" +) + +// Config provides a config for the template Renderer +type ( + Config struct { + Log *slog.Logger + TemplateString string + Data any + // Validate allows the validation of the rendered template on a given temp file path, optional + Validate func(path string) error + Fs afero.Fs + } + + Renderer struct { + fs afero.Afero + log *slog.Logger + tpl *template.Template + data any + validateFn func(path string) error + } +) + +// New returns a new template renderer +func New(c *Config) (*Renderer, error) { + if c == nil { + return nil, fmt.Errorf("renderer config is nil") + } + + tpl, err := template.New("tpl").Funcs(sprig.FuncMap()).Parse(c.TemplateString) + if err != nil { + return nil, err + } + + fs := afero.NewOsFs() + if c.Fs != nil { + fs = c.Fs + } + + return &Renderer{ + log: c.Log.WithGroup("template-renderer"), + tpl: tpl, + data: c.Data, + validateFn: c.Validate, + fs: afero.Afero{ + Fs: fs, + }, + }, nil +} + +// Render renders the given template to the given destination. +// Returns true when the template has changed. +func (r *Renderer) Render(ctx context.Context, destFile string) (changed bool, err error) { + r.log.Info("rendering template file", "destination", destFile, "data", r.data) + + stagingFile := fmt.Sprintf("%s-%s", destFile, uuid.New().String()) + + f, err := r.fs.Create(stagingFile) + if err != nil { + return false, err + } + + defer func() { + if err := f.Close(); err != nil { + r.log.Error("unable to close file", "error", err) + } + + if removeErr := r.fs.Remove(stagingFile); removeErr != nil && !os.IsNotExist(removeErr) { + r.log.Error("unable to remove staging file", "error", removeErr) + err = removeErr + } + }() + + w := bufio.NewWriter(f) + + if err = r.tpl.Execute(w, r.data); err != nil { + return false, err + } + + if err = w.Flush(); err != nil { + return false, err + } + + if r.validateFn != nil { + if err := r.validateFn(f.Name()); err != nil { + return false, err + } + + r.log.Debug("validated template successfully") + } + + if equal := r.compare(f.Name(), destFile); equal { + return false, nil + } + + if err = r.fs.Rename(f.Name(), destFile); err != nil { + return false, err + } + + return true, err +} + +func (r *Renderer) compare(source, target string) bool { + sourceChecksum, err := r.checksum(source) + if err != nil { + return false + } + + targetChecksum, err := r.checksum(target) + if err != nil { + return false + } + + return bytes.Equal(sourceChecksum, targetChecksum) +} + +func (r *Renderer) checksum(file string) ([]byte, error) { + f, err := r.fs.Open(file) + if err != nil { + return nil, err + } + + defer func() { + _ = f.Close() + }() + + h := sha256.New() + + if _, err := io.Copy(h, f); err != nil { + return nil, err + } + + return h.Sum(nil), nil +} diff --git a/pkg/template-renderer/renderer_test.go b/pkg/template-renderer/renderer_test.go new file mode 100644 index 0000000..eab04f3 --- /dev/null +++ b/pkg/template-renderer/renderer_test.go @@ -0,0 +1,124 @@ +package renderer_test + +import ( + "fmt" + "log/slog" + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + renderer "github.com/metal-stack/os-installer/pkg/template-renderer" + "github.com/metal-stack/os-installer/pkg/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_renderer_Render(t *testing.T) { + tests := []struct { + name string + c *renderer.Config + destFile string + fsMock func(fs afero.Afero) + wantRendered string + wantChanged bool + wantErr error + }{ + { + name: "render an initial unit", + c: &renderer.Config{ + TemplateString: "{{ .Hostname }}", + Data: map[string]string{ + "Hostname": "foo", + }, + }, + destFile: "/hostname", + wantRendered: "foo", + wantChanged: true, + wantErr: nil, + }, + { + name: "render an initial unit and call validation func", + c: &renderer.Config{ + TemplateString: "{{ .Hostname }}", + Data: map[string]string{ + "Hostname": "foo", + }, + Validate: func(path string) error { + assert.True(t, strings.HasPrefix(path, "/hostname")) + return fmt.Errorf("a validation error") + }, + }, + destFile: "/hostname", + wantRendered: "", + wantChanged: false, + wantErr: fmt.Errorf("a validation error"), + }, + { + name: "update existing file", + c: &renderer.Config{ + TemplateString: "{{ .Hostname }}", + Data: map[string]string{ + "Hostname": "foo", + }, + }, + destFile: "/hostname", + fsMock: func(fs afero.Afero) { + require.NoError(t, fs.WriteFile("/hostname", []byte("bar"), os.ModePerm)) + }, + wantRendered: "foo", + wantChanged: true, + wantErr: nil, + }, + { + name: "update existing file that did not change", + c: &renderer.Config{ + TemplateString: "{{ .Hostname }}", + Data: map[string]string{ + "Hostname": "foo", + }, + }, + destFile: "/hostname", + fsMock: func(fs afero.Afero) { + require.NoError(t, fs.WriteFile("/hostname", []byte("foo"), os.ModePerm)) + }, + wantRendered: "foo", + wantChanged: false, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.c.Log = slog.Default() + fs := afero.Afero{Fs: afero.NewMemMapFs()} + tt.c.Fs = fs + + r, err := renderer.New(tt.c) + require.NoError(t, err) + + if tt.fsMock != nil { + tt.fsMock(fs) + } + + gotChanged, gotErr := r.Render(t.Context(), tt.destFile) + + assert.Equal(t, tt.wantChanged, gotChanged) + + if diff := cmp.Diff(tt.wantErr, gotErr, test.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (+got -want):\n%s", diff) + } + + if tt.wantErr != nil { + return + } + + content, err := fs.ReadFile(tt.destFile) + require.NoError(t, err) + + if diff := cmp.Diff(tt.wantRendered, string(content)); diff != "" { + t.Errorf("diff (+got -want):\n%s", diff) + } + }) + } +} diff --git a/pkg/test/common.go b/pkg/test/common.go new file mode 100644 index 0000000..3b36baa --- /dev/null +++ b/pkg/test/common.go @@ -0,0 +1,18 @@ +package test + +import "github.com/google/go-cmp/cmp" + +func ErrorStringComparer() cmp.Option { + return cmp.Comparer(func(x, y error) bool { + if x == nil && y == nil { + return true + } + if x == nil && y != nil { + return false + } + if x != nil && y == nil { + return false + } + return x.Error() == y.Error() + }) +} diff --git a/cmdexec_test.go b/pkg/test/fakeexec.go similarity index 66% rename from cmdexec_test.go rename to pkg/test/fakeexec.go index eba25a6..64f5def 100644 --- a/cmdexec_test.go +++ b/pkg/test/fakeexec.go @@ -1,9 +1,8 @@ -package main +package test import ( "context" "encoding/json" - "fmt" "os" "os/exec" "testing" @@ -12,26 +11,27 @@ import ( "github.com/stretchr/testify/require" ) -// tests were inspired by this blog article: https://npf.io/2015/06/testing-exec-command/ +// inspired by this blog article: https://npf.io/2015/06/testing-exec-command/ type fakeexec struct { t *testing.T mockCount int - mocks []fakeexecparams + mocks []FakeExecParams } // nolint:musttag -type fakeexecparams struct { +type FakeExecParams struct { WantCmd []string `json:"want_cmd"` Output string `json:"output"` ExitCode int `json:"exit_code"` } -func fakeCmd(t *testing.T, params ...fakeexecparams) func(ctx context.Context, command string, args ...string) *exec.Cmd { +func FakeCmd(t *testing.T, params ...FakeExecParams) func(ctx context.Context, command string, args ...string) *exec.Cmd { f := fakeexec{ t: t, mocks: params, } + return f.command } @@ -51,20 +51,6 @@ func (f *fakeexec) command(ctx context.Context, command string, args ...string) cs := []string{"-test.run=TestHelperProcess", "--", string(j)} cmd := exec.CommandContext(ctx, os.Args[0], cs...) //nolint cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd -} - -func TestHelperProcess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - - var f fakeexecparams - err := json.Unmarshal([]byte(os.Args[3]), &f) - require.NoError(t, err) - _, err = fmt.Fprint(os.Stdout, f.Output) - require.NoError(t, err) - - os.Exit(f.ExitCode) + return cmd } diff --git a/pkg/test/fakeexec_test.go b/pkg/test/fakeexec_test.go new file mode 100644 index 0000000..d2c94a2 --- /dev/null +++ b/pkg/test/fakeexec_test.go @@ -0,0 +1,25 @@ +package test + +import ( + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + var f FakeExecParams + err := json.Unmarshal([]byte(os.Args[3]), &f) + require.NoError(t, err) + + _, err = fmt.Fprint(os.Stdout, f.Output) + require.NoError(t, err) + + os.Exit(f.ExitCode) +} diff --git a/templates/template.go b/templates/template.go deleted file mode 100644 index f25c9ab..0000000 --- a/templates/template.go +++ /dev/null @@ -1,30 +0,0 @@ -package templates - -import ( - "bytes" - _ "embed" - "text/template" - - "github.com/metal-stack/metal-go/api/models" -) - -type Chrony struct { - NTPServers []*models.V1NTPServer -} - -//go:embed chrony.conf.tpl -var chronyTemplate string - -func RenderChronyTemplate(chronyConfig Chrony) (string, error) { - templ, err := template.New("chrony").Parse(chronyTemplate) - if err != nil { - return "error parsing template", err - } - - rendered := new(bytes.Buffer) - err = templ.Execute(rendered, chronyConfig) - if err != nil { - return "error writing to template file", err - } - return rendered.String(), nil -} diff --git a/templates/template_test.go b/templates/template_test.go deleted file mode 100644 index d84cab3..0000000 --- a/templates/template_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package templates - -import ( - _ "embed" - "os" - "testing" - - "github.com/metal-stack/metal-go/api/models" - "github.com/stretchr/testify/require" -) - -func TestDefaultChronyTemplate(t *testing.T) { - defaultNTPServer := "time.cloudflare.com" - ntpServers := []*models.V1NTPServer{ - { - Address: &defaultNTPServer, - }, - } - - rendered := renderToString(t, Chrony{NTPServers: ntpServers}) - expected := readExpected(t, "test_data/defaultntp/chrony.conf") - - require.Equal(t, expected, rendered, "Wanted: %s\nGot: %s", expected, rendered) -} - -func TestCustomChronyTemplate(t *testing.T) { - customNTPServer := "custom.1.ntp.org" - ntpServers := []*models.V1NTPServer{ - { - Address: &customNTPServer, - }, - } - - rendered := renderToString(t, Chrony{NTPServers: ntpServers}) - expected := readExpected(t, "test_data/customntp/chrony.conf") - - require.Equal(t, expected, rendered, "Wanted: %s\nGot: %s", expected, rendered) -} - -func readExpected(t *testing.T, e string) string { - ex, err := os.ReadFile(e) - require.NoError(t, err, "Couldn't read %s", e) - return string(ex) -} - -func renderToString(t *testing.T, c Chrony) string { - r, err := RenderChronyTemplate(c) - require.NoError(t, err, "Could not render chrony configuration") - return r -} diff --git a/pkg/network/validate.sh b/validate.sh similarity index 75% rename from pkg/network/validate.sh rename to validate.sh index f2d62a2..4ac7850 100755 --- a/pkg/network/validate.sh +++ b/validate.sh @@ -4,7 +4,7 @@ set -e validate () { echo "----------------------------------------------------------------" - echo "Validating sample artifacts of metal-networker with ${1}:${2} frr:${3}" + echo "Validating sample artifacts of os-installer with ${1}:${2} frr:${3}" echo "----------------------------------------------------------------" tag="${1}_${2}_${3}" docker build \ @@ -13,7 +13,7 @@ validate () { --build-arg FRR_VERSION="${3}" \ --build-arg FRR_APT_CHANNEL="${4}" \ --file Dockerfile.validate \ - . -t metal-networker-validate:${tag} + . -t os-installer:${tag} docker run --interactive \ --rm \ @@ -21,8 +21,8 @@ validate () { --cap-add=NET_ADMIN \ --cap-add=NET_RAW \ --name vali \ - --volume ./testdata:/testdata \ - metal-networker-validate:${tag} /validate_os.sh + --volume ./pkg:/testdata:ro \ + os-installer:${tag} /validate_os.sh } validate "ubuntu" "24.04" "frr-10.4" "noble" diff --git a/pkg/network/validate_os.sh b/validate_os.sh similarity index 70% rename from pkg/network/validate_os.sh rename to validate_os.sh index 67c1d19..a6029f9 100755 --- a/pkg/network/validate_os.sh +++ b/validate_os.sh @@ -1,27 +1,27 @@ #!/bin/bash -testcases="/testdata/frr.conf.*" +testcases="/testdata/frr/test/frr.conf.*" for tc in $testcases; do echo -n "Testing ${FRR_VERSION} on ${OS_NAME}:${OS_VERSION} with input ${tc}: " if vtysh --dryrun --inputfile "${tc}"; then - printf "\e[32m\xE2\x9C\x94\e[0m\n" + echo "✅" else - printf "\e[31m\xE2\x9D\x8C\e[0m\n" + echo "❌" echo "FRR ${FRR_VERSION} on ${OS_NAME}:${OS_VERSION} produces an invalid configuration" exit 1 fi done -testcases="/testdata/nftrules*" +testcases="/testdata/nftables/test/nftrules*" for tc in $testcases; do echo -n "Testing nft rules on ${OS_NAME}:${OS_VERSION} with input ${tc}: " if nft -c -f "${tc}"; then - printf "\e[32m\xE2\x9C\x94\e[0m\n" + echo "✅" else - printf "\e[31m\xE2\x9D\x8C\e[0m\n" + echo "❌" echo "nft input ${tc} on ${OS_NAME}:${OS_VERSION} produces an invalid configuration" exit 1 fi -done \ No newline at end of file +done