Skip to content

Commit 0ddbc3f

Browse files
committed
Hardlink created over sensitive file
2 parents 8a08291 + 4fb52bf commit 0ddbc3f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+1276
-358
lines changed

.github/workflows/component-tests.yaml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ jobs:
5252
Test_11_EndpointTest,
5353
Test_12_MergingProfilesTest,
5454
Test_13_MergingNetworkNeighborhoodTest,
55+
Test_14_RulePoliciesTest,
5556
]
5657
steps:
5758
- name: Checkout code
@@ -97,9 +98,13 @@ jobs:
9798
- name: Run test
9899
run: |
99100
cd tests && go test -v ./... -run ${{ matrix.test }} --timeout=20m --tags=component
100-
- name: Print storage logs
101+
- name: Print node agent & storage logs
101102
if: always()
102103
run: |
104+
echo "Node agent logs"
105+
kubectl logs $(kubectl get pods -n kubescape -o name | grep node-agent) -n kubescape -c node-agent
106+
echo "-----------------------------------------"
107+
echo "Storage logs"
103108
kubectl logs $(kubectl get pods -n kubescape -o name | grep storage) -n kubescape
104109
105110
# - name: Upload plot images

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ require (
2323
github.com/kubescape/backend v0.0.20
2424
github.com/kubescape/go-logger v0.0.23
2525
github.com/kubescape/k8s-interface v0.0.170
26-
github.com/kubescape/storage v0.0.119
26+
github.com/kubescape/storage v0.0.132
2727
github.com/panjf2000/ants/v2 v2.9.1
2828
github.com/prometheus/alertmanager v0.27.0
2929
github.com/prometheus/client_golang v1.20.5

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -551,8 +551,8 @@ github.com/kubescape/go-logger v0.0.23 h1:5xh+Nm8eGImhFbtippRKLaFgsvlKE1ufvQhNM2
551551
github.com/kubescape/go-logger v0.0.23/go.mod h1:Ayg7g769c7sXVB+P3fkJmbsJpoEmMmaUf9jeo+XuC3U=
552552
github.com/kubescape/k8s-interface v0.0.170 h1:EtzomWoeeIWDz7QrAEsqUDpLHQwoh2m3tZITfrE/tiE=
553553
github.com/kubescape/k8s-interface v0.0.170/go.mod h1:VoEoHI4Va08NiGAkYzbITF50aFMT5y4fPHRb4x2LtME=
554-
github.com/kubescape/storage v0.0.119 h1:7qCSxMRfuCG35H3o832q69hBA06KKHyyLVW76nFy5YA=
555-
github.com/kubescape/storage v0.0.119/go.mod h1:DAR1CmSDhRRBK26nNU4MrVpRAst5nN7IuPuvcnw9XeI=
554+
github.com/kubescape/storage v0.0.132 h1:OmZ/thFrh0n29yvYYTce6aoVfpgSDi5k7rwtFHHGAoA=
555+
github.com/kubescape/storage v0.0.132/go.mod h1:0MIrMh9DVEPmT1+d7siysH6TX+8fTjXIIedoot/6klI=
556556
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
557557
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
558558
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=

main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ import (
1616
cloudmetadata "github.com/kubescape/node-agent/pkg/cloudmetadata"
1717
"github.com/kubescape/node-agent/pkg/config"
1818
"github.com/kubescape/node-agent/pkg/containerwatcher/v1"
19-
"github.com/kubescape/node-agent/pkg/dnsmanager"
19+
"github.com/kubescape/node-agent/pkg/eventreporters/dnsmanager"
2020
"github.com/kubescape/node-agent/pkg/exporters"
2121
"github.com/kubescape/node-agent/pkg/filehandler/v1"
2222
"github.com/kubescape/node-agent/pkg/healthmanager"

pkg/applicationprofilemanager/applicationprofile_manager_interface.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ type ApplicationProfileManagerClient interface {
1212
ReportFileExec(k8sContainerID, path string, args []string)
1313
ReportFileOpen(k8sContainerID, path string, flags []string)
1414
ReportHTTPEvent(k8sContainerID string, event *tracerhttptype.Event)
15+
ReportRulePolicy(k8sContainerID, ruleId, allowedProcess string, allowedContainer bool)
1516
ReportDroppedEvent(k8sContainerID string)
1617
ContainerReachedMaxTime(containerID string)
1718
}

pkg/applicationprofilemanager/applicationprofile_manager_mock.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@ func (a ApplicationProfileManagerMock) ReportHTTPEvent(_ string, _ *tracerhttpty
4242
// noop
4343
}
4444

45+
func (a ApplicationProfileManagerMock) ReportRulePolicy(_, _, _ string, _ bool) {
46+
// noop
47+
}
48+
4549
func (a ApplicationProfileManagerMock) ContainerReachedMaxTime(_ string) {
4650
// noop
4751
}

pkg/applicationprofilemanager/v1/applicationprofile_manager.go

Lines changed: 98 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import (
66
"fmt"
77
"regexp"
88
"runtime"
9+
"slices"
910
"strings"
1011
"time"
1112

@@ -53,10 +54,12 @@ type ApplicationProfileManager struct {
5354
savedExecs maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID
5455
savedOpens maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID
5556
savedSyscalls maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID
57+
savedRulePolicies maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID
5658
toSaveCapabilities maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID
5759
toSaveEndpoints maps.SafeMap[string, *maps.SafeMap[string, *v1beta1.HTTPEndpoint]] // key is k8sContainerID
5860
toSaveExecs maps.SafeMap[string, *maps.SafeMap[string, []string]] // key is k8sContainerID
5961
toSaveOpens maps.SafeMap[string, *maps.SafeMap[string, mapset.Set[string]]] // key is k8sContainerID
62+
toSaveRulePolicies maps.SafeMap[string, *maps.SafeMap[string, *v1beta1.RulePolicy]] // key is k8sContainerID
6063
watchedContainerChannels maps.SafeMap[string, chan error] // key is ContainerID
6164
k8sClient k8sclient.K8sClientInterface
6265
k8sObjectCache objectcache.K8sObjectCache
@@ -146,10 +149,12 @@ func (am *ApplicationProfileManager) deleteResources(watchedContainer *utils.Wat
146149
am.savedExecs.Delete(watchedContainer.K8sContainerID)
147150
am.savedOpens.Delete(watchedContainer.K8sContainerID)
148151
am.savedSyscalls.Delete(watchedContainer.K8sContainerID)
152+
am.savedRulePolicies.Delete(watchedContainer.K8sContainerID)
149153
am.toSaveCapabilities.Delete(watchedContainer.K8sContainerID)
150154
am.toSaveEndpoints.Delete(watchedContainer.K8sContainerID)
151155
am.toSaveExecs.Delete(watchedContainer.K8sContainerID)
152156
am.toSaveOpens.Delete(watchedContainer.K8sContainerID)
157+
am.toSaveRulePolicies.Delete(watchedContainer.K8sContainerID)
153158
am.watchedContainerChannels.Delete(watchedContainer.ContainerID)
154159
}
155160

@@ -173,7 +178,8 @@ func (am *ApplicationProfileManager) monitorContainer(ctx context.Context, conta
173178
watchedContainer.SetCompletionStatus(utils.WatchedContainerCompletionStatusFull)
174179
}
175180
watchedContainer.SetStatus(utils.WatchedContainerStatusInitializing)
176-
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)
181+
182+
initOps := GetInitOperations(watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex)
177183

178184
for {
179185
select {
@@ -184,20 +190,26 @@ func (am *ApplicationProfileManager) monitorContainer(ctx context.Context, conta
184190
watchedContainer.UpdateDataTicker.Reset(utils.AddJitter(am.cfg.UpdateDataPeriod, am.cfg.MaxJitterPercentage))
185191
}
186192
watchedContainer.SetStatus(utils.WatchedContainerStatusReady)
187-
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)
193+
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, nil)
194+
195+
// save profile after initialaztion
196+
if initOps != nil {
197+
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, initOps)
198+
initOps = nil
199+
}
200+
188201
case err := <-watchedContainer.SyncChannel:
189202
switch {
190203
case errors.Is(err, utils.ContainerHasTerminatedError):
191204
// if exit code is 0 we set the status to completed
192205
if objectcache.GetTerminationExitCode(am.k8sObjectCache, container.K8s.Namespace, container.K8s.PodName, container.K8s.ContainerName, container.Runtime.ContainerID) == 0 {
193206
watchedContainer.SetStatus(utils.WatchedContainerStatusCompleted)
194207
}
195-
196-
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)
208+
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, nil)
197209
return err
198210
case errors.Is(err, utils.ContainerReachedMaxTime):
199211
watchedContainer.SetStatus(utils.WatchedContainerStatusCompleted)
200-
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace)
212+
am.saveProfile(ctx, watchedContainer, container.K8s.Namespace, nil)
201213
return err
202214
case errors.Is(err, utils.ObjectCompleted):
203215
watchedContainer.SetStatus(utils.WatchedContainerStatusCompleted)
@@ -211,7 +223,7 @@ func (am *ApplicationProfileManager) monitorContainer(ctx context.Context, conta
211223
}
212224
}
213225

214-
func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedContainer *utils.WatchedContainerData, namespace string) {
226+
func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedContainer *utils.WatchedContainerData, namespace string, initalizeOperations []utils.PatchOperation) {
215227
ctx, span := otel.Tracer("").Start(ctx, "ApplicationProfileManager.saveProfile")
216228
defer span.End()
217229

@@ -314,6 +326,18 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
314326
opens[path].Append(open.ToSlice()...)
315327
return true
316328
})
329+
330+
// get rule policies
331+
rulePolicies := make(map[string]v1beta1.RulePolicy)
332+
toSaveRulePolicies := am.toSaveRulePolicies.Get(watchedContainer.K8sContainerID)
333+
// point IG to a new rule policies map
334+
am.toSaveRulePolicies.Set(watchedContainer.K8sContainerID, new(maps.SafeMap[string, *v1beta1.RulePolicy]))
335+
// prepare rule policies map
336+
toSaveRulePolicies.Range(func(ruleIdentifier string, rulePolicy *v1beta1.RulePolicy) bool {
337+
rulePolicies[ruleIdentifier] = *rulePolicy
338+
return true
339+
})
340+
317341
// new activity
318342
// the process tries to use JSON patching to avoid conflicts between updates on the same object from different containers
319343
// 0. create both a patch and a new object
@@ -323,9 +347,13 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
323347
// 3a. the object is missing its container slice - ADD one with the container profile at the right index
324348
// 3b. the object is missing the container profile - ADD the container profile at the right index
325349
// 3c. default - patch the container ourselves and REPLACE it at the right index
326-
if len(capabilities) > 0 || len(endpoints) > 0 || len(execs) > 0 || len(opens) > 0 || len(toSaveSyscalls) > 0 || watchedContainer.StatusUpdated() {
350+
if len(capabilities) > 0 || len(endpoints) > 0 || len(execs) > 0 || len(opens) > 0 || len(toSaveSyscalls) > 0 || len(initalizeOperations) > 0 || watchedContainer.StatusUpdated() {
327351
// 0. calculate patch
328-
operations := utils.CreateCapabilitiesPatchOperations(capabilities, observedSyscalls, execs, opens, endpoints, watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex)
352+
operations := utils.CreateCapabilitiesPatchOperations(capabilities, observedSyscalls, execs, opens, endpoints, rulePolicies, watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex)
353+
if len(initalizeOperations) > 0 {
354+
operations = append(operations, initalizeOperations...)
355+
}
356+
329357
operations = utils.AppendStatusAnnotationPatchOperations(operations, watchedContainer)
330358
operations = append(operations, utils.PatchOperation{
331359
Op: "add",
@@ -366,6 +394,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
366394
Opens: make([]v1beta1.OpenCalls, 0),
367395
Capabilities: make([]string, 0),
368396
Syscalls: make([]string, 0),
397+
PolicyByRuleId: make(map[string]v1beta1.RulePolicy),
369398
SeccompProfile: seccompProfile,
370399
})
371400
}
@@ -377,7 +406,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
377406
newObject.Spec.EphemeralContainers = addContainers(newObject.Spec.EphemeralContainers, watchedContainer.ContainerNames[utils.EphemeralContainer])
378407
// enrich container
379408
newContainer := utils.GetApplicationProfileContainer(newObject, watchedContainer.ContainerType, watchedContainer.ContainerIndex)
380-
utils.EnrichApplicationProfileContainer(newContainer, capabilities, observedSyscalls, execs, opens, endpoints)
409+
utils.EnrichApplicationProfileContainer(newContainer, capabilities, observedSyscalls, execs, opens, endpoints, rulePolicies)
381410
// try to create object
382411
if err := am.storageClient.CreateApplicationProfile(newObject, namespace); err != nil {
383412
gotErr = err
@@ -425,11 +454,12 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
425454
Opens: make([]v1beta1.OpenCalls, 0),
426455
Capabilities: make([]string, 0),
427456
Syscalls: make([]string, 0),
457+
PolicyByRuleId: make(map[string]v1beta1.RulePolicy),
428458
SeccompProfile: seccompProfile,
429459
}
430460
}
431461
// update it
432-
utils.EnrichApplicationProfileContainer(existingContainer, capabilities, observedSyscalls, execs, opens, endpoints)
462+
utils.EnrichApplicationProfileContainer(existingContainer, capabilities, observedSyscalls, execs, opens, endpoints, rulePolicies)
433463
// get existing containers
434464
var existingContainers []v1beta1.ApplicationProfileContainer
435465
if watchedContainer.ContainerType == utils.Container {
@@ -469,6 +499,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
469499
Opens: make([]v1beta1.OpenCalls, 0),
470500
Capabilities: make([]string, 0),
471501
Syscalls: make([]string, 0),
502+
PolicyByRuleId: make(map[string]v1beta1.RulePolicy),
472503
SeccompProfile: seccompProfile,
473504
},
474505
})
@@ -558,11 +589,22 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
558589
}
559590
return true
560591
})
592+
593+
// record saved rule policies
594+
toSaveRulePolicies.Range(func(ruleIdentifier string, rulePolicy *v1beta1.RulePolicy) bool {
595+
if !am.toSaveRulePolicies.Get(watchedContainer.K8sContainerID).Has(ruleIdentifier) {
596+
am.savedRulePolicies.Get(watchedContainer.K8sContainerID).Set(ruleIdentifier, rulePolicy)
597+
}
598+
return true
599+
})
600+
561601
logger.L().Debug("ApplicationProfileManager - saved application profile",
562602
helpers.Int("capabilities", len(capabilities)),
563603
helpers.Int("endpoints", toSaveEndpoints.Len()),
564604
helpers.Int("execs", toSaveExecs.Len()),
565605
helpers.Int("opens", toSaveOpens.Len()),
606+
helpers.Int("rule policies", toSaveRulePolicies.Len()),
607+
helpers.Int("init operations", len(initalizeOperations)),
566608
helpers.String("slug", slug),
567609
helpers.Int("container index", watchedContainer.ContainerIndex),
568610
helpers.String("container ID", watchedContainer.ContainerID),
@@ -638,10 +680,12 @@ func (am *ApplicationProfileManager) ContainerCallback(notif containercollection
638680
am.savedExecs.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod))
639681
am.savedOpens.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod))
640682
am.savedSyscalls.Set(k8sContainerID, mapset.NewSet[string]())
683+
am.savedRulePolicies.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod))
641684
am.toSaveCapabilities.Set(k8sContainerID, mapset.NewSet[string]())
642685
am.toSaveEndpoints.Set(k8sContainerID, new(maps.SafeMap[string, *v1beta1.HTTPEndpoint]))
643686
am.toSaveExecs.Set(k8sContainerID, new(maps.SafeMap[string, []string]))
644687
am.toSaveOpens.Set(k8sContainerID, new(maps.SafeMap[string, mapset.Set[string]]))
688+
am.toSaveRulePolicies.Set(k8sContainerID, new(maps.SafeMap[string, *v1beta1.RulePolicy]))
645689
am.removedContainers.Remove(k8sContainerID) // make sure container is not in the removed list
646690
am.trackedContainers.Add(k8sContainerID)
647691
go am.startApplicationProfiling(ctx, notif.Container, k8sContainerID)
@@ -718,8 +762,8 @@ func (am *ApplicationProfileManager) ReportHTTPEvent(k8sContainerID string, even
718762
if err := am.waitForContainer(k8sContainerID); err != nil {
719763
return
720764
}
721-
// get endpoint from event
722-
endpointIdentifier, err := am.GetEndpointIdentifier(event)
765+
766+
endpointIdentifier, err := GetEndpointIdentifier(event)
723767
if err != nil {
724768
logger.L().Ctx(am.ctx).Warning("ApplicationProfileManager - failed to get endpoint identifier", helpers.Error(err))
725769
return
@@ -737,3 +781,45 @@ func (am *ApplicationProfileManager) ReportHTTPEvent(k8sContainerID string, even
737781
// add to endpoint map
738782
am.toSaveEndpoints.Get(k8sContainerID).Set(endpointHash, endpoint)
739783
}
784+
785+
func (am *ApplicationProfileManager) ReportRulePolicy(k8sContainerID, ruleId, allowedProcess string, allowedContainer bool) {
786+
if err := am.waitForContainer(k8sContainerID); err != nil {
787+
return
788+
}
789+
790+
newPolicy := &v1beta1.RulePolicy{
791+
AllowedContainer: allowedContainer,
792+
AllowedProcesses: []string{allowedProcess},
793+
}
794+
795+
savedPolicies := am.savedRulePolicies.Get(k8sContainerID)
796+
savedPolicy, ok := savedPolicies.Get(ruleId)
797+
if ok {
798+
savedPolicy := savedPolicy.(*v1beta1.RulePolicy)
799+
if IsPolicyIncluded(savedPolicy, newPolicy) {
800+
return
801+
}
802+
}
803+
804+
toBeSavedPolicies := am.toSaveRulePolicies.Get(k8sContainerID)
805+
toBeSavedPolicy := toBeSavedPolicies.Get(ruleId)
806+
807+
if IsPolicyIncluded(toBeSavedPolicy, newPolicy) {
808+
return
809+
}
810+
811+
var finalPolicy *v1beta1.RulePolicy
812+
if toBeSavedPolicy != nil {
813+
finalPolicy = toBeSavedPolicy
814+
if allowedContainer {
815+
finalPolicy.AllowedContainer = true
816+
}
817+
if allowedProcess != "" && !slices.Contains(finalPolicy.AllowedProcesses, allowedProcess) {
818+
finalPolicy.AllowedProcesses = append(finalPolicy.AllowedProcesses, allowedProcess)
819+
}
820+
} else {
821+
finalPolicy = newPolicy
822+
}
823+
824+
toBeSavedPolicies.Set(ruleId, finalPolicy)
825+
}

0 commit comments

Comments
 (0)