Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/policy rules #399

Merged
merged 22 commits into from
Nov 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .github/workflows/component-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ jobs:
Test_11_EndpointTest,
Test_12_MergingProfilesTest,
Test_13_MergingNetworkNeighborhoodTest,
Test_14_RulePoliciesTest,
]
steps:
- name: Checkout code
Expand Down Expand Up @@ -97,9 +98,13 @@ jobs:
- name: Run test
run: |
cd tests && go test -v ./... -run ${{ matrix.test }} --timeout=20m --tags=component
- name: Print storage logs
- name: Print node agent & storage logs
if: always()
run: |
echo "Node agent logs"
kubectl logs $(kubectl get pods -n kubescape -o name | grep node-agent) -n kubescape -c node-agent
echo "-----------------------------------------"
echo "Storage logs"
kubectl logs $(kubectl get pods -n kubescape -o name | grep storage) -n kubescape

# - name: Upload plot images
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ require (
github.com/kubescape/backend v0.0.20
github.com/kubescape/go-logger v0.0.23
github.com/kubescape/k8s-interface v0.0.170
github.com/kubescape/storage v0.0.119
github.com/kubescape/storage v0.0.132
github.com/panjf2000/ants/v2 v2.9.1
github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.20.4
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -553,8 +553,8 @@ github.com/kubescape/go-logger v0.0.23 h1:5xh+Nm8eGImhFbtippRKLaFgsvlKE1ufvQhNM2
github.com/kubescape/go-logger v0.0.23/go.mod h1:Ayg7g769c7sXVB+P3fkJmbsJpoEmMmaUf9jeo+XuC3U=
github.com/kubescape/k8s-interface v0.0.170 h1:EtzomWoeeIWDz7QrAEsqUDpLHQwoh2m3tZITfrE/tiE=
github.com/kubescape/k8s-interface v0.0.170/go.mod h1:VoEoHI4Va08NiGAkYzbITF50aFMT5y4fPHRb4x2LtME=
github.com/kubescape/storage v0.0.119 h1:7qCSxMRfuCG35H3o832q69hBA06KKHyyLVW76nFy5YA=
github.com/kubescape/storage v0.0.119/go.mod h1:DAR1CmSDhRRBK26nNU4MrVpRAst5nN7IuPuvcnw9XeI=
github.com/kubescape/storage v0.0.132 h1:OmZ/thFrh0n29yvYYTce6aoVfpgSDi5k7rwtFHHGAoA=
github.com/kubescape/storage v0.0.132/go.mod h1:0MIrMh9DVEPmT1+d7siysH6TX+8fTjXIIedoot/6klI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
Expand Down
2 changes: 1 addition & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
cloudmetadata "github.com/kubescape/node-agent/pkg/cloudmetadata"
"github.com/kubescape/node-agent/pkg/config"
"github.com/kubescape/node-agent/pkg/containerwatcher/v1"
"github.com/kubescape/node-agent/pkg/dnsmanager"
"github.com/kubescape/node-agent/pkg/eventreporters/dnsmanager"
"github.com/kubescape/node-agent/pkg/exporters"
"github.com/kubescape/node-agent/pkg/filehandler/v1"
"github.com/kubescape/node-agent/pkg/healthmanager"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ type ApplicationProfileManagerClient interface {
ReportFileExec(k8sContainerID, path string, args []string)
ReportFileOpen(k8sContainerID, path string, flags []string)
ReportHTTPEvent(k8sContainerID string, event *tracerhttptype.Event)
ReportRulePolicy(k8sContainerID, ruleId, allowedProcess string, allowedContainer bool)
ReportDroppedEvent(k8sContainerID string)
ContainerReachedMaxTime(containerID string)
}
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ func (a ApplicationProfileManagerMock) ReportHTTPEvent(_ string, _ *tracerhttpty
// noop
}

func (a ApplicationProfileManagerMock) ReportRulePolicy(_, _, _ string, _ bool) {
// noop
}

func (a ApplicationProfileManagerMock) ContainerReachedMaxTime(_ string) {
// noop
}
110 changes: 98 additions & 12 deletions pkg/applicationprofilemanager/v1/applicationprofile_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"
"regexp"
"runtime"
"slices"
"strings"
"time"

Expand Down Expand Up @@ -53,10 +54,12 @@ type ApplicationProfileManager struct {
savedExecs maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID
savedOpens maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID
savedSyscalls maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID
savedRulePolicies maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID
toSaveCapabilities maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID
toSaveEndpoints maps.SafeMap[string, *maps.SafeMap[string, *v1beta1.HTTPEndpoint]] // key is k8sContainerID
toSaveExecs maps.SafeMap[string, *maps.SafeMap[string, []string]] // key is k8sContainerID
toSaveOpens maps.SafeMap[string, *maps.SafeMap[string, mapset.Set[string]]] // key is k8sContainerID
toSaveRulePolicies maps.SafeMap[string, *maps.SafeMap[string, *v1beta1.RulePolicy]] // key is k8sContainerID
watchedContainerChannels maps.SafeMap[string, chan error] // key is ContainerID
k8sClient k8sclient.K8sClientInterface
k8sObjectCache objectcache.K8sObjectCache
Expand Down Expand Up @@ -146,10 +149,12 @@ func (am *ApplicationProfileManager) deleteResources(watchedContainer *utils.Wat
am.savedExecs.Delete(watchedContainer.K8sContainerID)
am.savedOpens.Delete(watchedContainer.K8sContainerID)
am.savedSyscalls.Delete(watchedContainer.K8sContainerID)
am.savedRulePolicies.Delete(watchedContainer.K8sContainerID)
am.toSaveCapabilities.Delete(watchedContainer.K8sContainerID)
am.toSaveEndpoints.Delete(watchedContainer.K8sContainerID)
am.toSaveExecs.Delete(watchedContainer.K8sContainerID)
am.toSaveOpens.Delete(watchedContainer.K8sContainerID)
am.toSaveRulePolicies.Delete(watchedContainer.K8sContainerID)
am.watchedContainerChannels.Delete(watchedContainer.ContainerID)
}

Expand All @@ -173,7 +178,8 @@ func (am *ApplicationProfileManager) monitorContainer(ctx context.Context, conta
watchedContainer.SetCompletionStatus(utils.WatchedContainerCompletionStatusFull)
}
watchedContainer.SetStatus(utils.WatchedContainerStatusInitializing)
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)

initOps := GetInitOperations(watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex)

for {
select {
Expand All @@ -184,20 +190,26 @@ func (am *ApplicationProfileManager) monitorContainer(ctx context.Context, conta
watchedContainer.UpdateDataTicker.Reset(utils.AddJitter(am.cfg.UpdateDataPeriod, am.cfg.MaxJitterPercentage))
}
watchedContainer.SetStatus(utils.WatchedContainerStatusReady)
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, nil)

// save profile after initialaztion
if initOps != nil {
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, initOps)
initOps = nil
}

case err := <-watchedContainer.SyncChannel:
switch {
case errors.Is(err, utils.ContainerHasTerminatedError):
// if exit code is 0 we set the status to completed
if objectcache.GetTerminationExitCode(am.k8sObjectCache, container.K8s.Namespace, container.K8s.PodName, container.K8s.ContainerName, container.Runtime.ContainerID) == 0 {
watchedContainer.SetStatus(utils.WatchedContainerStatusCompleted)
}

am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, nil)
return err
case errors.Is(err, utils.ContainerReachedMaxTime):
watchedContainer.SetStatus(utils.WatchedContainerStatusCompleted)
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, nil)
return err
case errors.Is(err, utils.ObjectCompleted):
watchedContainer.SetStatus(utils.WatchedContainerStatusCompleted)
Expand All @@ -211,7 +223,7 @@ func (am *ApplicationProfileManager) monitorContainer(ctx context.Context, conta
}
}

func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedContainer *utils.WatchedContainerData, namespace string) {
func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedContainer *utils.WatchedContainerData, namespace string, initalizeOperations []utils.PatchOperation) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is a bit ugly, but let's leave it like that for the moment

ctx, span := otel.Tracer("").Start(ctx, "ApplicationProfileManager.saveProfile")
defer span.End()

Expand Down Expand Up @@ -314,6 +326,18 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
opens[path].Append(open.ToSlice()...)
return true
})

// get rule policies
rulePolicies := make(map[string]v1beta1.RulePolicy)
toSaveRulePolicies := am.toSaveRulePolicies.Get(watchedContainer.K8sContainerID)
// point IG to a new rule policies map
am.toSaveRulePolicies.Set(watchedContainer.K8sContainerID, new(maps.SafeMap[string, *v1beta1.RulePolicy]))
// prepare rule policies map
toSaveRulePolicies.Range(func(ruleIdentifier string, rulePolicy *v1beta1.RulePolicy) bool {
rulePolicies[ruleIdentifier] = *rulePolicy
return true
})

// new activity
// the process tries to use JSON patching to avoid conflicts between updates on the same object from different containers
// 0. create both a patch and a new object
Expand All @@ -323,9 +347,13 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
// 3a. the object is missing its container slice - ADD one with the container profile at the right index
// 3b. the object is missing the container profile - ADD the container profile at the right index
// 3c. default - patch the container ourselves and REPLACE it at the right index
if len(capabilities) > 0 || len(endpoints) > 0 || len(execs) > 0 || len(opens) > 0 || len(toSaveSyscalls) > 0 || watchedContainer.StatusUpdated() {
if len(capabilities) > 0 || len(endpoints) > 0 || len(execs) > 0 || len(opens) > 0 || len(toSaveSyscalls) > 0 || len(initalizeOperations) > 0 || watchedContainer.StatusUpdated() {
// 0. calculate patch
operations := utils.CreateCapabilitiesPatchOperations(capabilities, observedSyscalls, execs, opens, endpoints, watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex)
operations := utils.CreateCapabilitiesPatchOperations(capabilities, observedSyscalls, execs, opens, endpoints, rulePolicies, watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex)
if len(initalizeOperations) > 0 {
operations = append(operations, initalizeOperations...)
}

operations = utils.AppendStatusAnnotationPatchOperations(operations, watchedContainer)
operations = append(operations, utils.PatchOperation{
Op: "add",
Expand Down Expand Up @@ -366,6 +394,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
Opens: make([]v1beta1.OpenCalls, 0),
Capabilities: make([]string, 0),
Syscalls: make([]string, 0),
PolicyByRuleId: make(map[string]v1beta1.RulePolicy),
SeccompProfile: seccompProfile,
})
}
Expand All @@ -377,7 +406,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
newObject.Spec.EphemeralContainers = addContainers(newObject.Spec.EphemeralContainers, watchedContainer.ContainerNames[utils.EphemeralContainer])
// enrich container
newContainer := utils.GetApplicationProfileContainer(newObject, watchedContainer.ContainerType, watchedContainer.ContainerIndex)
utils.EnrichApplicationProfileContainer(newContainer, capabilities, observedSyscalls, execs, opens, endpoints)
utils.EnrichApplicationProfileContainer(newContainer, capabilities, observedSyscalls, execs, opens, endpoints, rulePolicies)
// try to create object
if err := am.storageClient.CreateApplicationProfile(newObject, namespace); err != nil {
gotErr = err
Expand Down Expand Up @@ -425,11 +454,12 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
Opens: make([]v1beta1.OpenCalls, 0),
Capabilities: make([]string, 0),
Syscalls: make([]string, 0),
PolicyByRuleId: make(map[string]v1beta1.RulePolicy),
SeccompProfile: seccompProfile,
}
}
// update it
utils.EnrichApplicationProfileContainer(existingContainer, capabilities, observedSyscalls, execs, opens, endpoints)
utils.EnrichApplicationProfileContainer(existingContainer, capabilities, observedSyscalls, execs, opens, endpoints, rulePolicies)
// get existing containers
var existingContainers []v1beta1.ApplicationProfileContainer
if watchedContainer.ContainerType == utils.Container {
Expand Down Expand Up @@ -469,6 +499,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
Opens: make([]v1beta1.OpenCalls, 0),
Capabilities: make([]string, 0),
Syscalls: make([]string, 0),
PolicyByRuleId: make(map[string]v1beta1.RulePolicy),
SeccompProfile: seccompProfile,
},
})
Expand Down Expand Up @@ -558,11 +589,22 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
}
return true
})

// record saved rule policies
toSaveRulePolicies.Range(func(ruleIdentifier string, rulePolicy *v1beta1.RulePolicy) bool {
if !am.toSaveRulePolicies.Get(watchedContainer.K8sContainerID).Has(ruleIdentifier) {
am.savedRulePolicies.Get(watchedContainer.K8sContainerID).Set(ruleIdentifier, rulePolicy)
}
return true
})

logger.L().Debug("ApplicationProfileManager - saved application profile",
helpers.Int("capabilities", len(capabilities)),
helpers.Int("endpoints", toSaveEndpoints.Len()),
helpers.Int("execs", toSaveExecs.Len()),
helpers.Int("opens", toSaveOpens.Len()),
helpers.Int("rule policies", toSaveRulePolicies.Len()),
helpers.Int("init operations", len(initalizeOperations)),
helpers.String("slug", slug),
helpers.Int("container index", watchedContainer.ContainerIndex),
helpers.String("container ID", watchedContainer.ContainerID),
Expand Down Expand Up @@ -638,10 +680,12 @@ func (am *ApplicationProfileManager) ContainerCallback(notif containercollection
am.savedExecs.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod))
am.savedOpens.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod))
am.savedSyscalls.Set(k8sContainerID, mapset.NewSet[string]())
am.savedRulePolicies.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod))
am.toSaveCapabilities.Set(k8sContainerID, mapset.NewSet[string]())
am.toSaveEndpoints.Set(k8sContainerID, new(maps.SafeMap[string, *v1beta1.HTTPEndpoint]))
am.toSaveExecs.Set(k8sContainerID, new(maps.SafeMap[string, []string]))
am.toSaveOpens.Set(k8sContainerID, new(maps.SafeMap[string, mapset.Set[string]]))
am.toSaveRulePolicies.Set(k8sContainerID, new(maps.SafeMap[string, *v1beta1.RulePolicy]))
am.removedContainers.Remove(k8sContainerID) // make sure container is not in the removed list
am.trackedContainers.Add(k8sContainerID)
go am.startApplicationProfiling(ctx, notif.Container, k8sContainerID)
Expand Down Expand Up @@ -718,8 +762,8 @@ func (am *ApplicationProfileManager) ReportHTTPEvent(k8sContainerID string, even
if err := am.waitForContainer(k8sContainerID); err != nil {
return
}
// get endpoint from event
endpointIdentifier, err := am.GetEndpointIdentifier(event)

endpointIdentifier, err := GetEndpointIdentifier(event)
if err != nil {
logger.L().Ctx(am.ctx).Warning("ApplicationProfileManager - failed to get endpoint identifier", helpers.Error(err))
return
Expand All @@ -737,3 +781,45 @@ func (am *ApplicationProfileManager) ReportHTTPEvent(k8sContainerID string, even
// add to endpoint map
am.toSaveEndpoints.Get(k8sContainerID).Set(endpointHash, endpoint)
}

func (am *ApplicationProfileManager) ReportRulePolicy(k8sContainerID, ruleId, allowedProcess string, allowedContainer bool) {
if err := am.waitForContainer(k8sContainerID); err != nil {
return
}

newPolicy := &v1beta1.RulePolicy{
AllowedContainer: allowedContainer,
AllowedProcesses: []string{allowedProcess},
}

savedPolicies := am.savedRulePolicies.Get(k8sContainerID)
savedPolicy, ok := savedPolicies.Get(ruleId)
if ok {
savedPolicy := savedPolicy.(*v1beta1.RulePolicy)
if IsPolicyIncluded(savedPolicy, newPolicy) {
return
}
}

toBeSavedPolicies := am.toSaveRulePolicies.Get(k8sContainerID)
toBeSavedPolicy := toBeSavedPolicies.Get(ruleId)

if IsPolicyIncluded(toBeSavedPolicy, newPolicy) {
return
}

var finalPolicy *v1beta1.RulePolicy
if toBeSavedPolicy != nil {
finalPolicy = toBeSavedPolicy
if allowedContainer {
finalPolicy.AllowedContainer = true
}
if allowedProcess != "" && !slices.Contains(finalPolicy.AllowedProcesses, allowedProcess) {
finalPolicy.AllowedProcesses = append(finalPolicy.AllowedProcesses, allowedProcess)
}
} else {
finalPolicy = newPolicy
}

toBeSavedPolicies.Set(ruleId, finalPolicy)
}
Loading
Loading