Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Enabling BPFLSM based KSP protection on Kubearmor itself" #1896

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions KubeArmor/BPF/enforcer.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,7 @@ int BPF_PROG(enforce_proc, struct linux_binprm *bprm, int ret) {
if (src_offset == NULL)
fromSourceCheck = false;

void *src_ptr;
if (src_buf->buf[*src_offset]) {
src_ptr = &src_buf->buf[*src_offset];
}
void *src_ptr = &src_buf->buf[*src_offset];
if (src_ptr == NULL)
fromSourceCheck = false;

Expand Down Expand Up @@ -155,9 +152,10 @@ int BPF_PROG(enforce_proc, struct linux_binprm *bprm, int ret) {
goto decision;
}


// match exec name
struct qstr d_name;
d_name = BPF_CORE_READ(f_path.dentry, d_name);
d_name = BPF_CORE_READ(f_path.dentry,d_name);
bpf_map_update_elem(&bufk, &two, z, BPF_ANY);
bpf_probe_read_str(pk->path, MAX_STRING_SIZE, d_name.name);

Expand Down
31 changes: 15 additions & 16 deletions KubeArmor/BPF/shared.h
Original file line number Diff line number Diff line change
Expand Up @@ -272,9 +272,6 @@ static inline void get_outer_key(struct outer_key *pokey,
struct task_struct *t) {
pokey->pid_ns = get_task_pid_ns_id(t);
pokey->mnt_ns = get_task_mnt_ns_id(t);
// TODO: Use cgroup ns as well for host process identification to support enforcement on deployments using hostpidns
// u32 cg_ns = BPF_CORE_READ(t, nsproxy, cgroup_ns, ns).inum;
// if (pokey->pid_ns == PROC_PID_INIT_INO && cg_ns == PROC_CGROUP_INIT_INO) {
if (pokey->pid_ns == PROC_PID_INIT_INO) {
pokey->pid_ns = 0;
pokey->mnt_ns = 0;
Expand All @@ -291,13 +288,20 @@ static __always_inline u32 init_context(event *event_data) {
event_data->host_ppid = get_task_ppid(task);
event_data->host_pid = bpf_get_current_pid_tgid() >> 32;

struct outer_key okey;
get_outer_key(&okey, task);
event_data->pid_id = okey.pid_ns;
event_data->mnt_id = okey.mnt_ns;
u32 pid = get_task_ns_tgid(task);
if (event_data->host_pid == pid) { // host
event_data->pid_id = 0;
event_data->mnt_id = 0;

event_data->ppid = get_task_ppid(task);
event_data->pid = bpf_get_current_pid_tgid() >> 32;
} else { // container
event_data->pid_id = get_task_pid_ns_id(task);
event_data->mnt_id = get_task_mnt_ns_id(task);

event_data->ppid = get_task_ppid(task);
event_data->pid = get_task_ns_tgid(task);
event_data->ppid = get_task_ns_ppid(task);
event_data->pid = pid;
}

event_data->uid = bpf_get_current_uid_gid();

Expand Down Expand Up @@ -483,15 +487,10 @@ static inline int match_and_enforce_path_hooks(struct path *f_path, u32 id,
if (src_offset == NULL)
fromSourceCheck = false;

void *src_ptr;
if (src_buf->buf[*src_offset]) {
src_ptr = &src_buf->buf[*src_offset];
}
if (src_ptr == NULL)
fromSourceCheck = false;
void *ptr = &src_buf->buf[*src_offset];

if (fromSourceCheck) {
bpf_probe_read_str(store->source, MAX_STRING_SIZE, src_ptr);
bpf_probe_read_str(store->source, MAX_STRING_SIZE, ptr);

val = bpf_map_lookup_elem(inner, store);

Expand Down
9 changes: 0 additions & 9 deletions KubeArmor/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,6 @@ type KubearmorConfig struct {
MaxAlertPerSec int // Maximum alerts allowed per second
ThrottleSec int // Number of seconds for which subsequent alerts will be dropped
AnnotateResources bool // enable annotations by kubearmor if kubearmor-controller is not present

ProcFsMount string // path where procfs is hosted
}

// GlobalCfg Global configuration for Kubearmor
Expand Down Expand Up @@ -107,7 +105,6 @@ const (
ConfigMaxAlertPerSec string = "maxAlertPerSec"
ConfigThrottleSec string = "throttleSec"
ConfigAnnotateResources string = "annotateResources"
ConfigProcFsMount string = "procfsMount"
)

func readCmdLineParams() {
Expand Down Expand Up @@ -164,8 +161,6 @@ func readCmdLineParams() {

annotateResources := flag.Bool(ConfigAnnotateResources, false, "for kubearmor deployment without kubearmor-controller")

procFsMount := flag.String(ConfigProcFsMount, "/proc", "Path to the BPF filesystem to use for storing maps")

flags := []string{}
flag.VisitAll(func(f *flag.Flag) {
kv := fmt.Sprintf("%s:%v", f.Name, f.Value)
Expand Down Expand Up @@ -227,8 +222,6 @@ func readCmdLineParams() {
viper.SetDefault(ConfigThrottleSec, *throttleSec)

viper.SetDefault(ConfigAnnotateResources, *annotateResources)

viper.SetDefault(ConfigProcFsMount, *procFsMount)
}

// LoadConfig Load configuration
Expand Down Expand Up @@ -329,8 +322,6 @@ func LoadConfig() error {
GlobalCfg.ThrottleSec = viper.GetInt(ConfigThrottleSec)
GlobalCfg.AnnotateResources = viper.GetBool(ConfigAnnotateResources)

GlobalCfg.ProcFsMount = viper.GetString(ConfigProcFsMount)

kg.Printf("Final Configuration [%+v]", GlobalCfg)

return nil
Expand Down
5 changes: 2 additions & 3 deletions KubeArmor/core/containerdHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
Expand Down Expand Up @@ -194,13 +193,13 @@ func (ch *ContainerdHandler) GetContainerInfo(ctx context.Context, containerID s

pid := strconv.Itoa(int(taskRes.Processes[0].Pid))

if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil {
if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil {
if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil {
kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error())
}
}

if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil {
if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil {
if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil {
kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error())
}
Expand Down
5 changes: 2 additions & 3 deletions KubeArmor/core/crioHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"

Expand Down Expand Up @@ -131,15 +130,15 @@ func (ch *CrioHandler) GetContainerInfo(ctx context.Context, containerID string,

pid := strconv.Itoa(containerInfo.Pid)

if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil {
if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil {
if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil {
kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error())
}
} else {
return container, err
}

if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil {
if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil {
if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil {
kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error())
}
Expand Down
5 changes: 2 additions & 3 deletions KubeArmor/core/dockerHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
Expand Down Expand Up @@ -145,13 +144,13 @@ func (dh *DockerHandler) GetContainerInfo(containerID string, OwnerInfo map[stri

pid := strconv.Itoa(inspect.State.Pid)

if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil {
if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil {
if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil {
kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error())
}
}

if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil {
if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil {
if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil {
kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error())
}
Expand Down
6 changes: 3 additions & 3 deletions KubeArmor/core/kubeUpdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -731,9 +731,9 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
}

// exception: kubearmor
// if _, ok := pod.Labels["kubearmor-app"]; ok {
// pod.Annotations["kubearmor-policy"] = "audited"
// }
if _, ok := pod.Labels["kubearmor-app"]; ok {
pod.Annotations["kubearmor-policy"] = "audited"
}

// == Visibility == //

Expand Down
4 changes: 2 additions & 2 deletions KubeArmor/enforcer/appArmorEnforcer.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,11 @@ profile apparmor-default flags=(attach_disconnected,mediate_deleted) {

existingProfiles := []string{}

if pids, err := os.ReadDir(filepath.Clean(cfg.GlobalCfg.ProcFsMount)); err == nil {
if pids, err := os.ReadDir(filepath.Clean("/proc")); err == nil {
for _, f := range pids {
if f.IsDir() {
if _, err := strconv.Atoi(f.Name()); err == nil {
if content, err := os.ReadFile(filepath.Clean(cfg.GlobalCfg.ProcFsMount + "/" + f.Name() + "/attr/current")); err == nil {
if content, err := os.ReadFile(filepath.Clean("/proc/" + f.Name() + "/attr/current")); err == nil {
line := strings.Split(string(content), "\n")[0]
words := strings.Split(line, " ")

Expand Down
4 changes: 2 additions & 2 deletions KubeArmor/enforcer/bpflsm/enforcer.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ import (
tp "github.com/kubearmor/KubeArmor/KubeArmor/types"
)

//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer ../../BPF/enforcer.bpf.c -- -I/usr/include/ -O2 -g -fno-stack-protector
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer_path ../../BPF/enforcer_path.bpf.c -- -I/usr/include/ -O2 -g -fno-stack-protector
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer ../../BPF/enforcer.bpf.c -- -I/usr/include/ -O2 -g
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer_path ../../BPF/enforcer_path.bpf.c -- -I/usr/include/ -O2 -g

// ===================== //
// == BPFLSM Enforcer == //
Expand Down
6 changes: 1 addition & 5 deletions KubeArmor/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (

var clusterPtr, gRPCPtr, logPathPtr *string
var enableKubeArmorPolicyPtr, enableKubeArmorHostPolicyPtr, enableKubeArmorVMPtr, coverageTestPtr, enableK8sEnv, tlsEnabled *bool
var defaultFilePosturePtr, defaultCapabilitiesPosturePtr, defaultNetworkPosturePtr, hostDefaultCapabilitiesPosturePtr, hostDefaultNetworkPosturePtr, hostDefaultFilePosturePtr, procFsMountPtr *string
var defaultFilePosturePtr, defaultCapabilitiesPosturePtr, defaultNetworkPosturePtr, hostDefaultCapabilitiesPosturePtr, hostDefaultNetworkPosturePtr, hostDefaultFilePosturePtr *string

func init() {
// options (string)
Expand All @@ -32,8 +32,6 @@ func init() {
hostDefaultNetworkPosturePtr = flag.String("hostDefaultNetworkPosture", "block", "configuring default enforcement action in global network context {allow|audit|block}")
hostDefaultCapabilitiesPosturePtr = flag.String("hostDefaultCapabilitiesPosture", "block", "configuring default enforcement action in global capability context {allow|audit|block}")

procFsMountPtr = flag.String("procfsMount", "/proc", "Path to the BPF filesystem to use for storing maps")

// options (boolean)
enableKubeArmorPolicyPtr = flag.Bool("enableKubeArmorPolicy", true, "enabling KubeArmorPolicy")
enableKubeArmorHostPolicyPtr = flag.Bool("enableKubeArmorHostPolicy", true, "enabling KubeArmorHostPolicy")
Expand All @@ -44,7 +42,6 @@ func init() {

// options (boolean)
coverageTestPtr = flag.Bool("coverageTest", false, "enabling CoverageTest")

}

// TestMain - test to drive external testing coverage
Expand All @@ -67,7 +64,6 @@ func TestMain(t *testing.T) {
fmt.Sprintf("-enableKubeArmorHostPolicy=%s", strconv.FormatBool(*enableKubeArmorHostPolicyPtr)),
fmt.Sprintf("-coverageTest=%s", strconv.FormatBool(*coverageTestPtr)),
fmt.Sprintf("-tlsEnabled=%s", strconv.FormatBool(*tlsEnabled)),
fmt.Sprintf("-procfsMount=%s", *procFsMountPtr),
}

t.Log("[INFO] Executed KubeArmor")
Expand Down
7 changes: 3 additions & 4 deletions KubeArmor/monitor/processTree.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ package monitor

import (
"os"
"path/filepath"
"strconv"
"strings"
"sync"
Expand Down Expand Up @@ -232,7 +231,7 @@ func (mon *SystemMonitor) GetParentExecPath(containerID string, ctx SyscallConte

if readlink {
// just in case that it couldn't still get the full path
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPPID), 10), "/exe")); err == nil && data != "" && data != "/" {
if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPPID), 10) + "/exe"); err == nil && data != "" && data != "/" {
// // Store it in the ActiveHostPidMap so we don't need to read procfs again
// // We don't call BuildPidNode Here cause that will put this into a cyclic function call loop
// if pidMap, ok := ActiveHostPidMap[containerID]; ok {
Expand Down Expand Up @@ -277,7 +276,7 @@ func (mon *SystemMonitor) GetExecPath(containerID string, ctx SyscallContext, re

if readlink {
// just in case that it couldn't still get the full path
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPID), 10), "/exe")); err == nil && data != "" && data != "/" {
if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPID), 10) + "/exe"); err == nil && data != "" && data != "/" {
// // Store it in the ActiveHostPidMap so we don't need to read procfs again
// if pidMap, ok := ActiveHostPidMap[containerID]; ok {
// if node, ok := pidMap[ctx.HostPID]; ok {
Expand Down Expand Up @@ -319,7 +318,7 @@ func (mon *SystemMonitor) GetCommand(containerID string, ctx SyscallContext, rea

if readlink {
// just in case that it couldn't still get the full path
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPID), 10), "/exe")); err == nil && data != "" && data != "/" {
if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPID), 10) + "/exe"); err == nil && data != "" && data != "/" {
return data
} else if err != nil {
mon.Logger.Debugf("Could not read path from procfs due to %s", err.Error())
Expand Down
2 changes: 1 addition & 1 deletion deployments/get/objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,6 @@ func GenerateDaemonSet(env, namespace string) *appsv1.DaemonSet {
var terminationGracePeriodSeconds = int64(60)
var args = []string{
"-gRPC=" + strconv.Itoa(int(port)),
"-procfsMount=/host/procfs",
}

var containerVolumeMounts = []corev1.VolumeMount{
Expand Down Expand Up @@ -382,6 +381,7 @@ func GenerateDaemonSet(env, namespace string) *appsv1.DaemonSet {
Operator: "Exists",
},
},
HostPID: true,
HostNetwork: true,
RestartPolicy: "Always",
DNSPolicy: "ClusterFirstWithHostNet",
Expand Down
14 changes: 0 additions & 14 deletions pkg/KubeArmorOperator/common/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,27 +237,13 @@ var CommonVolumes = []corev1.Volume{
},
},
},
{
Name: "proc-fs-mount",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/proc",
Type: &HostPathDirectory,
},
},
},
}

var CommonVolumesMount = []corev1.VolumeMount{
{
Name: "sys-kernel-debug-path",
MountPath: "/sys/kernel/debug",
},
{
Name: "proc-fs-mount",
MountPath: "/host/procfs",
ReadOnly: true,
},
}

var KubeArmorCaVolume = []corev1.Volume{
Expand Down
Loading