diff --git a/.gitignore b/.gitignore index 6d9cf638a..6ed11f4e2 100644 --- a/.gitignore +++ b/.gitignore @@ -75,6 +75,8 @@ test/integration/components/gohttp2/client/client test/integration/components/gohttp2/server/http2srv beyla.sln test/integration/components/gokafka/gokafka -test/integration/components/gokafka/vendor -test/integration/components/goredis/vendor +test/integration/components/gokafka/vendor/* +test/integration/components/gokafka-seg/vendor/* +test/integration/components/gokafka-seg/gokafka +test/integration/components/goredis/vendor/* test/integration/components/goredis/goredis \ No newline at end of file diff --git a/README.md b/README.md index 13e1d686f..e4cc01fff 100644 --- a/README.md +++ b/README.md @@ -96,6 +96,7 @@ The Go instrumentation is limited to certain specific libraries. | [Go x/net/http2](https://golang.org/x/net/http2) | ✅ | | [Go-Redis v9](github.com/redis/go-redis) | ✅ | | [Sarama Kafka](github.com/IBM/sarama) | ✅ | +| [kafka-Go](https://github.com/segmentio/kafka-go) | ✅ | HTTPS instrumentation is limited to Go programs and libraries/languages using libssl3. diff --git a/bpf/go_kafka_go.c b/bpf/go_kafka_go.c new file mode 100644 index 000000000..f4d7a5391 --- /dev/null +++ b/bpf/go_kafka_go.c @@ -0,0 +1,290 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "utils.h" +#include "bpf_dbg.h" +#include "go_common.h" +#include "ringbuf.h" + +#define KAFKA_API_FETCH 1 +#define KAFKA_API_PRODUCE 0 + +volatile const u64 kafka_go_writer_topic_pos; +volatile const u64 kafka_go_protocol_conn_pos; +volatile const u64 kafka_go_reader_topic_pos; + +typedef struct produce_req { + u64 msg_ptr; + u64 conn_ptr; + u64 start_monotime_ns; +} produce_req_t; + +typedef struct topic { + char name[MAX_TOPIC_NAME_LEN]; + tp_info_t tp; +} topic_t; + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, void *); // w_ptr + __type(value, tp_info_t); // traceparent + __uint(max_entries, MAX_CONCURRENT_REQUESTS); +} produce_traceparents SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, void *); // goroutine + __type(value, topic_t); // topic info + __uint(max_entries, MAX_CONCURRENT_REQUESTS); +} ongoing_produce_topics SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, void *); // msg ptr + __type(value, topic_t); // topic info + __uint(max_entries, MAX_CONCURRENT_REQUESTS); +} ongoing_produce_messages SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, void *); // goroutine + __type(value, produce_req_t); // rw ptr + start time + __uint(max_entries, MAX_CONCURRENT_REQUESTS); +} produce_requests SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, void *); // goroutine + __type(value, kafka_go_req_t); // rw ptr + start time + __uint(max_entries, MAX_CONCURRENT_REQUESTS); +} fetch_requests SEC(".maps"); + +// Code for the produce messages path +SEC("uprobe/writer_write_messages") +int uprobe_writer_write_messages(struct pt_regs *ctx) { + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + void *w_ptr = (void *)GO_PARAM1(ctx); + bpf_dbg_printk("=== uprobe/kafka-go writer_write_messages %llx w_ptr %llx === ", goroutine_addr, w_ptr); + + tp_info_t tp = {}; + + // We don't look up in the headers, no http/grpc request, therefore 0 as last argument + client_trace_parent(goroutine_addr, &tp, 0); + + bpf_map_update_elem(&produce_traceparents, &w_ptr, &tp, BPF_ANY); + return 0; +} + +SEC("uprobe/writer_produce") +int uprobe_writer_produce(struct pt_regs *ctx) { + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + bpf_dbg_printk("=== uprobe/kafka-go writer_produce %llx === ", goroutine_addr); + + void *w_ptr = (void *)GO_PARAM1(ctx); + + if (w_ptr) { + void *topic_ptr = 0; + bpf_probe_read_user(&topic_ptr, sizeof(void *), w_ptr + kafka_go_writer_topic_pos); + + bpf_dbg_printk("topic_ptr %llx", topic_ptr); + if (topic_ptr) { + topic_t topic = {}; + + tp_info_t *tp = bpf_map_lookup_elem(&produce_traceparents, &w_ptr); + if (tp) { + bpf_dbg_printk("found existing traceparent %llx", tp); + __builtin_memcpy(&topic.tp, tp, sizeof(tp_info_t)); + } else { + urand_bytes(topic.tp.trace_id, TRACE_ID_SIZE_BYTES); + urand_bytes(topic.tp.span_id, SPAN_ID_SIZE_BYTES); + } + + bpf_probe_read_user(&topic.name, sizeof(topic.name), topic_ptr); + bpf_map_update_elem(&ongoing_produce_topics, &goroutine_addr, &topic, BPF_ANY); + } + bpf_map_delete_elem(&produce_traceparents, &w_ptr); + } + + return 0; +} + +SEC("uprobe/client_roundTrip") +int uprobe_client_roundTrip(struct pt_regs *ctx) { + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + bpf_dbg_printk("=== uprobe/kafka-go client_roundTrip %llx === ", goroutine_addr); + + topic_t *topic_ptr = bpf_map_lookup_elem(&ongoing_produce_topics, &goroutine_addr); + + if (topic_ptr) { + void *msg_ptr = (void *)GO_PARAM7(ctx); + bpf_dbg_printk("msg ptr %llx", msg_ptr); + if (msg_ptr) { + topic_t topic; + __builtin_memcpy(&topic, topic_ptr, sizeof(topic_t)); + bpf_map_update_elem(&ongoing_produce_messages, &msg_ptr, &topic, BPF_ANY); + } + } + + bpf_map_delete_elem(&ongoing_produce_topics, &goroutine_addr); + return 0; +} + +SEC("uprobe/protocol_RoundTrip") +int uprobe_protocol_roundtrip(struct pt_regs *ctx) { + bpf_dbg_printk("=== uprobe/kafka-go protocol_RoundTrip === "); + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + void *rw_ptr = (void *)GO_PARAM2(ctx); + void *msg_ptr = (void *)GO_PARAM8(ctx); + bpf_dbg_printk("goroutine_addr %lx, rw ptr %llx, msg_ptr %llx", goroutine_addr, rw_ptr, msg_ptr); + + + if (rw_ptr) { + topic_t *topic_ptr = bpf_map_lookup_elem(&ongoing_produce_messages, &msg_ptr); + bpf_dbg_printk("Found topic %llx", topic_ptr); + if (topic_ptr) { + produce_req_t p = { + .conn_ptr = ((u64)rw_ptr) + kafka_go_protocol_conn_pos, + .msg_ptr = (u64)msg_ptr, + .start_monotime_ns = bpf_ktime_get_ns(), + }; + + bpf_map_update_elem(&produce_requests, &goroutine_addr, &p, BPF_ANY); + } + } + + return 0; +} + +SEC("uprobe/protocol_RoundTrip_ret") +int uprobe_protocol_roundtrip_ret(struct pt_regs *ctx) { + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + bpf_dbg_printk("=== uprobe/protocol_RoundTrip ret %llx === ", goroutine_addr); + + produce_req_t *p_ptr = bpf_map_lookup_elem(&produce_requests, &goroutine_addr); + + bpf_dbg_printk("p_ptr %llx", p_ptr); + + if (p_ptr) { + void *msg_ptr = (void *)p_ptr->msg_ptr; + topic_t *topic_ptr = bpf_map_lookup_elem(&ongoing_produce_messages, &msg_ptr); + + bpf_dbg_printk("goroutine_addr %lx, conn ptr %llx, msg_ptr = %llx, topic_ptr = %llx", goroutine_addr, p_ptr->conn_ptr, p_ptr->msg_ptr, topic_ptr); + + if (topic_ptr) { + kafka_go_req_t *trace = bpf_ringbuf_reserve(&events, sizeof(kafka_go_req_t), 0); + if (trace) { + trace->type = EVENT_GO_KAFKA_SEG; + trace->op = KAFKA_API_PRODUCE; + trace->start_monotime_ns = p_ptr->start_monotime_ns; + trace->end_monotime_ns = bpf_ktime_get_ns(); + + void *conn_ptr = 0; + bpf_probe_read(&conn_ptr, sizeof(conn_ptr), (void *)(p_ptr->conn_ptr + 8)); // find conn + bpf_dbg_printk("conn ptr %llx", conn_ptr); + if (conn_ptr) { + get_conn_info(conn_ptr, &trace->conn); + } + + __builtin_memcpy(trace->topic, topic_ptr->name, MAX_TOPIC_NAME_LEN); + __builtin_memcpy(&trace->tp, &(topic_ptr->tp), sizeof(tp_info_t)); + task_pid(&trace->pid); + bpf_ringbuf_submit(trace, get_flags()); + } + } + bpf_map_delete_elem(&ongoing_produce_messages, &msg_ptr); + } + + bpf_map_delete_elem(&produce_requests, &goroutine_addr); + + return 0; +} + + +// Code for the fetch messages path +SEC("uprobe/reader_read") +int uprobe_reader_read(struct pt_regs *ctx) { + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + void *r_ptr = (void *)GO_PARAM1(ctx); + void *conn = (void *)GO_PARAM5(ctx); + bpf_printk("=== uprobe/kafka-go reader_read %llx r_ptr %llx=== ", goroutine_addr, r_ptr); + + if (r_ptr) { + kafka_go_req_t r = { + .type = EVENT_GO_KAFKA_SEG, + .op = KAFKA_API_FETCH, + .start_monotime_ns = 0, + }; + + void *topic_ptr = 0; + bpf_probe_read_user(&topic_ptr, sizeof(void *), r_ptr + kafka_go_reader_topic_pos); + + bpf_dbg_printk("topic_ptr %llx", topic_ptr); + if (topic_ptr) { + bpf_probe_read_user(&r.topic, sizeof(r.topic), topic_ptr); + } + + if (conn) { + void *conn_ptr = 0; + bpf_probe_read(&conn_ptr, sizeof(conn_ptr), (void *)(conn + 8)); // find conn + bpf_dbg_printk("conn ptr %llx", conn_ptr); + if (conn_ptr) { + get_conn_info(conn_ptr, &r.conn); + } + } + + bpf_map_update_elem(&fetch_requests, &goroutine_addr, &r, BPF_ANY); + } + + return 0; +} + +SEC("uprobe/reader_send_message") +int uprobe_reader_send_message(struct pt_regs *ctx) { + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + bpf_dbg_printk("=== uprobe/kafka-go reader_send_message %llx === ", goroutine_addr); + + kafka_go_req_t *req = (kafka_go_req_t *)bpf_map_lookup_elem(&fetch_requests, &goroutine_addr); + bpf_dbg_printk("Found req_ptr %llx", req); + + if (req) { + req->start_monotime_ns = bpf_ktime_get_ns(); + } + + return 0; +} + +SEC("uprobe/reader_read") +int uprobe_reader_read_ret(struct pt_regs *ctx) { + void *goroutine_addr = (void *)GOROUTINE_PTR(ctx); + bpf_dbg_printk("=== uprobe/kafka-go reader_read ret %llx === ", goroutine_addr); + + kafka_go_req_t *req = (kafka_go_req_t *)bpf_map_lookup_elem(&fetch_requests, &goroutine_addr); + bpf_dbg_printk("Found req_ptr %llx", req); + + if (req) { + if (req->start_monotime_ns) { + kafka_go_req_t *trace = bpf_ringbuf_reserve(&events, sizeof(kafka_go_req_t), 0); + if (trace) { + __builtin_memcpy(trace, req, sizeof(kafka_go_req_t)); + trace->end_monotime_ns = bpf_ktime_get_ns(); + task_pid(&trace->pid); + bpf_ringbuf_submit(trace, get_flags()); + } + } else { + bpf_dbg_printk("Found request with no start time, ignoring..."); + } + } + + bpf_map_delete_elem(&fetch_requests, &goroutine_addr); + + return 0; +} \ No newline at end of file diff --git a/bpf/go_kafka.c b/bpf/go_sarama.c similarity index 100% rename from bpf/go_kafka.c rename to bpf/go_sarama.c diff --git a/bpf/http_trace.c b/bpf/http_trace.c index d7e01782b..ce0a8de04 100644 --- a/bpf/http_trace.c +++ b/bpf/http_trace.c @@ -18,3 +18,4 @@ const sql_request_trace *unused_3 __attribute__((unused)); const tcp_req_t *unused_5 __attribute__((unused)); const kafka_client_req_t *unused_6 __attribute__((unused)); const redis_client_req_t *unused_7 __attribute__((unused)); +const kafka_go_req_t *unused_8 __attribute__((unused)); diff --git a/bpf/http_trace.h b/bpf/http_trace.h index c7ec949ee..8efe99318 100644 --- a/bpf/http_trace.h +++ b/bpf/http_trace.h @@ -25,6 +25,7 @@ #define SQL_MAX_LEN 500 #define KAFKA_MAX_LEN 256 #define REDIS_MAX_LEN 256 +#define MAX_TOPIC_NAME_LEN 64 // Trace of an HTTP call invocation. It is instantiated by the return uprobe and forwarded to the // user space through the events ringbuffer. @@ -63,6 +64,17 @@ typedef struct kafka_client_req { pid_info pid; } __attribute__((packed)) kafka_client_req_t; +typedef struct kafka_go_req { + u8 type; // Must be first + u64 start_monotime_ns; + u64 end_monotime_ns; + u8 topic[MAX_TOPIC_NAME_LEN]; + connection_info_t conn __attribute__ ((aligned (8))); + tp_info_t tp; + pid_info pid; + u8 op; +} __attribute__((packed)) kafka_go_req_t; + typedef struct redis_client_req { u8 type; // Must be first u64 start_monotime_ns; diff --git a/bpf/ringbuf.h b/bpf/ringbuf.h index 47352a669..3ce26fa43 100644 --- a/bpf/ringbuf.h +++ b/bpf/ringbuf.h @@ -15,6 +15,7 @@ #define EVENT_TCP_REQUEST 8 #define EVENT_GO_KAFKA 9 #define EVENT_GO_REDIS 10 +#define EVENT_GO_KAFKA_SEG 11 // the segment-io version (kafka-go) has different format // setting here the following map definitions without pinning them to a global namespace // would lead that services running both HTTP and GRPC server would duplicate diff --git a/configs/offsets/kafkago/go.mod b/configs/offsets/kafkago/go.mod new file mode 100644 index 000000000..d1b322e74 --- /dev/null +++ b/configs/offsets/kafkago/go.mod @@ -0,0 +1,10 @@ +module kafkago_off + +go 1.22.2 + +require github.com/segmentio/kafka-go v0.4.47 + +require ( + github.com/klauspost/compress v1.15.9 // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect +) diff --git a/configs/offsets/kafkago/go.sum b/configs/offsets/kafkago/go.sum new file mode 100644 index 000000000..3c1b808d0 --- /dev/null +++ b/configs/offsets/kafkago/go.sum @@ -0,0 +1,68 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= +github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/configs/offsets/kafkago/inspect.go b/configs/offsets/kafkago/inspect.go new file mode 100644 index 000000000..93c833c95 --- /dev/null +++ b/configs/offsets/kafkago/inspect.go @@ -0,0 +1,70 @@ +package main + +import ( + "context" + "fmt" + "log" + "os" + "strings" + + kafka "github.com/segmentio/kafka-go" +) + +func producerHandler(kafkaWriter *kafka.Writer) { + msg := kafka.Message{ + Key: []byte("address-hello"), + Value: []byte("world"), + } + err := kafkaWriter.WriteMessages(context.Background(), msg) + + if err != nil { + fmt.Printf("error %v\n", err) + } +} + +func getKafkaWriter(kafkaURL, topic string) *kafka.Writer { + return &kafka.Writer{ + Addr: kafka.TCP(kafkaURL), + Topic: topic, + Balancer: &kafka.LeastBytes{}, + } +} + +func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader { + brokers := strings.Split(kafkaURL, ",") + return kafka.NewReader(kafka.ReaderConfig{ + Brokers: brokers, + GroupID: groupID, + Topic: topic, + MinBytes: 10e3, // 10KB + MaxBytes: 10e6, // 10MB + }) +} + +func main() { + // get kafka writer using environment variables. + kafkaURL := os.Getenv("kafkaURL") + topic := os.Getenv("topic") + + kafkaWriter := getKafkaWriter(kafkaURL, topic) + defer kafkaWriter.Close() + + groupID := os.Getenv("groupID") + + reader := getKafkaReader(kafkaURL, topic, groupID) + defer reader.Close() + + go func() { + fmt.Println("start consuming ... !!") + for { + m, err := reader.ReadMessage(context.Background()) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("message at topic:%v partition:%v offset:%v %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) + } + }() + + // Call the producer to keep the symbols + producerHandler(kafkaWriter) +} diff --git a/configs/offsets/tracker_input.json b/configs/offsets/tracker_input.json index a0a4dfecb..793056c96 100644 --- a/configs/offsets/tracker_input.json +++ b/configs/offsets/tracker_input.json @@ -150,5 +150,20 @@ "bw" ] } - } + }, + "github.com/segmentio/kafka-go": { + "inspect": "./configs/offsets/kafkago/inspect.go", + "versions": ">= v0.4.11", + "fields": { + "github.com/segmentio/kafka-go.Writer": [ + "Topic" + ], + "github.com/segmentio/kafka-go/protocol.Conn": [ + "conn" + ], + "github.com/segmentio/kafka-go.reader": [ + "topic" + ] + } + } } diff --git a/pkg/internal/discover/finder.go b/pkg/internal/discover/finder.go index 56fdd225f..2834f677e 100644 --- a/pkg/internal/discover/finder.go +++ b/pkg/internal/discover/finder.go @@ -8,14 +8,15 @@ import ( "github.com/grafana/beyla/pkg/beyla" "github.com/grafana/beyla/pkg/internal/ebpf" - "github.com/grafana/beyla/pkg/internal/ebpf/gokafka" "github.com/grafana/beyla/pkg/internal/ebpf/goredis" "github.com/grafana/beyla/pkg/internal/ebpf/goruntime" "github.com/grafana/beyla/pkg/internal/ebpf/grpc" "github.com/grafana/beyla/pkg/internal/ebpf/httpfltr" "github.com/grafana/beyla/pkg/internal/ebpf/httpssl" + "github.com/grafana/beyla/pkg/internal/ebpf/kafkago" "github.com/grafana/beyla/pkg/internal/ebpf/nethttp" "github.com/grafana/beyla/pkg/internal/ebpf/nodejs" + "github.com/grafana/beyla/pkg/internal/ebpf/sarama" "github.com/grafana/beyla/pkg/internal/imetrics" "github.com/grafana/beyla/pkg/internal/pipe/global" ) @@ -101,9 +102,10 @@ func newGoTracersGroup(cfg *beyla.Config, metrics imetrics.Reporter) []ebpf.Trac nethttp.New(cfg, metrics), grpc.New(cfg, metrics), goruntime.New(cfg, metrics), - gokafka.New(cfg, metrics), - &gokafka.ShopifyKafkaTracer{Tracer: *gokafka.New(cfg, metrics)}, + sarama.New(cfg, metrics), + &sarama.ShopifyKafkaTracer{Tracer: *sarama.New(cfg, metrics)}, goredis.New(cfg, metrics), + kafkago.New(cfg, metrics), } } diff --git a/pkg/internal/ebpf/common/bpf_bpfel_arm64.go b/pkg/internal/ebpf/common/bpf_bpfel_arm64.go index 82dba3318..85de02229 100644 --- a/pkg/internal/ebpf/common/bpf_bpfel_arm64.go +++ b/pkg/internal/ebpf/common/bpf_bpfel_arm64.go @@ -127,6 +127,30 @@ type bpfKafkaClientReqT struct { } } +type bpfKafkaGoReqT struct { + Type uint8 + StartMonotimeNs uint64 + EndMonotimeNs uint64 + Topic [64]uint8 + _ [7]byte + Conn bpfConnectionInfoT + Tp struct { + TraceId [16]uint8 + SpanId [8]uint8 + ParentId [8]uint8 + Ts uint64 + Flags uint8 + _ [7]byte + } + Pid struct { + HostPid uint32 + UserPid uint32 + Ns uint32 + } + Op uint8 + _ [7]byte +} + type bpfRedisClientReqT struct { Type uint8 StartMonotimeNs uint64 diff --git a/pkg/internal/ebpf/common/bpf_bpfel_arm64.o b/pkg/internal/ebpf/common/bpf_bpfel_arm64.o index 6bf0843ba..5d3393e13 100644 Binary files a/pkg/internal/ebpf/common/bpf_bpfel_arm64.o and b/pkg/internal/ebpf/common/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/common/bpf_bpfel_x86.go b/pkg/internal/ebpf/common/bpf_bpfel_x86.go index dcac35f95..79fb03529 100644 --- a/pkg/internal/ebpf/common/bpf_bpfel_x86.go +++ b/pkg/internal/ebpf/common/bpf_bpfel_x86.go @@ -127,6 +127,30 @@ type bpfKafkaClientReqT struct { } } +type bpfKafkaGoReqT struct { + Type uint8 + StartMonotimeNs uint64 + EndMonotimeNs uint64 + Topic [64]uint8 + _ [7]byte + Conn bpfConnectionInfoT + Tp struct { + TraceId [16]uint8 + SpanId [8]uint8 + ParentId [8]uint8 + Ts uint64 + Flags uint8 + _ [7]byte + } + Pid struct { + HostPid uint32 + UserPid uint32 + Ns uint32 + } + Op uint8 + _ [7]byte +} + type bpfRedisClientReqT struct { Type uint8 StartMonotimeNs uint64 diff --git a/pkg/internal/ebpf/common/bpf_bpfel_x86.o b/pkg/internal/ebpf/common/bpf_bpfel_x86.o index 6bf0843ba..5d3393e13 100644 Binary files a/pkg/internal/ebpf/common/bpf_bpfel_x86.o and b/pkg/internal/ebpf/common/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/common/common.go b/pkg/internal/ebpf/common/common.go index 8ac567010..3bedc6abd 100644 --- a/pkg/internal/ebpf/common/common.go +++ b/pkg/internal/ebpf/common/common.go @@ -17,7 +17,7 @@ import ( "github.com/grafana/beyla/pkg/internal/request" ) -//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 -type http_request_trace -type sql_request_trace -type http_info_t -type connection_info_t -type http2_grpc_request_t -type tcp_req_t -type kafka_client_req_t -type redis_client_req_t bpf ../../../../bpf/http_trace.c -- -I../../../../bpf/headers +//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 -type http_request_trace -type sql_request_trace -type http_info_t -type connection_info_t -type http2_grpc_request_t -type tcp_req_t -type kafka_client_req_t -type kafka_go_req_t -type redis_client_req_t bpf ../../../../bpf/http_trace.c -- -I../../../../bpf/headers // HTTPRequestTrace contains information from an HTTP request as directly received from the // eBPF layer. This contains low-level C structures for accurate binary read from ring buffer. @@ -26,15 +26,17 @@ type SQLRequestTrace bpfSqlRequestTrace type BPFHTTPInfo bpfHttpInfoT type BPFConnInfo bpfConnectionInfoT type TCPRequestInfo bpfTcpReqT -type GoKafkaClientInfo bpfKafkaClientReqT +type GoSaramaClientInfo bpfKafkaClientReqT type GoRedisClientInfo bpfRedisClientReqT +type GoKafkaGoClientInfo bpfKafkaGoReqT -const EventTypeSQL = 5 // EVENT_SQL_CLIENT -const EventTypeKHTTP = 6 // HTTP Events generated by kprobes -const EventTypeKHTTP2 = 7 // HTTP2/gRPC Events generated by kprobes -const EventTypeTCP = 8 // Unknown TCP protocol to be classified by user space -const EventTypeGoKafka = 9 // Kafka client for Go -const EventTypeGoRedis = 10 // Redis client for Go +const EventTypeSQL = 5 // EVENT_SQL_CLIENT +const EventTypeKHTTP = 6 // HTTP Events generated by kprobes +const EventTypeKHTTP2 = 7 // HTTP2/gRPC Events generated by kprobes +const EventTypeTCP = 8 // Unknown TCP protocol to be classified by user space +const EventTypeGoSarama = 9 // Kafka client for Go (Shopify/IBM Sarama) +const EventTypeGoRedis = 10 // Redis client for Go +const EventTypeGoKafkaGo = 11 // Kafka-Go client from Segment-io var IntegrityModeOverride = false @@ -119,10 +121,12 @@ func ReadBPFTraceAsSpan(record *ringbuf.Record, filter ServiceFilter) (request.S return ReadHTTP2InfoIntoSpan(record, filter) case EventTypeTCP: return ReadTCPRequestIntoSpan(record, filter) - case EventTypeGoKafka: - return ReadGoKafkaRequestIntoSpan(record) + case EventTypeGoSarama: + return ReadGoSaramaRequestIntoSpan(record) case EventTypeGoRedis: return ReadGoRedisRequestIntoSpan(record) + case EventTypeGoKafkaGo: + return ReadGoKafkaGoRequestIntoSpan(record) } var event HTTPRequestTrace diff --git a/pkg/internal/ebpf/common/go_kafka_transform.go b/pkg/internal/ebpf/common/go_kafka_transform.go index b6821c24b..3603a12c2 100644 --- a/pkg/internal/ebpf/common/go_kafka_transform.go +++ b/pkg/internal/ebpf/common/go_kafka_transform.go @@ -3,31 +3,35 @@ package ebpfcommon import ( "bytes" "encoding/binary" + "fmt" "unsafe" "github.com/cilium/ebpf/ringbuf" + "go.opentelemetry.io/otel/trace" "github.com/grafana/beyla/pkg/internal/request" ) -func ReadGoKafkaRequestIntoSpan(record *ringbuf.Record) (request.Span, bool, error) { - var event GoKafkaClientInfo +func ReadGoSaramaRequestIntoSpan(record *ringbuf.Record) (request.Span, bool, error) { + var event GoSaramaClientInfo err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event) if err != nil { return request.Span{}, true, err } + fmt.Printf("Event %v\n", event) + info, err := ProcessKafkaRequest(event.Buf[:]) if err == nil { - return GoKafkaToSpan(&event, info), false, nil + return GoKafkaSaramaToSpan(&event, info), false, nil } return request.Span{}, true, nil // ignore if we couldn't parse it } -func GoKafkaToSpan(event *GoKafkaClientInfo, data *KafkaInfo) request.Span { +func GoKafkaSaramaToSpan(event *GoSaramaClientInfo, data *KafkaInfo) request.Span { peer := "" hostname := "" hostPort := 0 @@ -58,3 +62,50 @@ func GoKafkaToSpan(event *GoKafkaClientInfo, data *KafkaInfo) request.Span { }, } } + +func ReadGoKafkaGoRequestIntoSpan(record *ringbuf.Record) (request.Span, bool, error) { + var event GoKafkaGoClientInfo + + err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event) + if err != nil { + return request.Span{}, true, err + } + + peer := "" + hostname := "" + hostPort := 0 + + if event.Conn.S_port != 0 || event.Conn.D_port != 0 { + peer, hostname = (*BPFConnInfo)(unsafe.Pointer(&event.Conn)).reqHostInfo() + hostPort = int(event.Conn.D_port) + } + + op := Produce + if event.Op == 1 { + op = Fetch + } + + return request.Span{ + Type: request.EventTypeKafkaClient, + Method: op.String(), + OtherNamespace: "github.com/segmentio/kafka-go", + Path: cstr(event.Topic[:]), + Peer: peer, + PeerPort: int(event.Conn.S_port), + Host: hostname, + HostPort: hostPort, + ContentLength: 0, + RequestStart: int64(event.StartMonotimeNs), + Start: int64(event.StartMonotimeNs), + End: int64(event.EndMonotimeNs), + TraceID: trace.TraceID(event.Tp.TraceId), + SpanID: trace.SpanID(event.Tp.SpanId), + ParentSpanID: trace.SpanID(event.Tp.ParentId), + Status: 0, + Pid: request.PidInfo{ + HostPID: event.Pid.HostPid, + UserPID: event.Pid.UserPid, + Namespace: event.Pid.Ns, + }, + }, false, nil +} diff --git a/pkg/internal/ebpf/gokafka/bpf_bpfel_arm64.o b/pkg/internal/ebpf/gokafka/bpf_bpfel_arm64.o deleted file mode 100644 index a54923446..000000000 Binary files a/pkg/internal/ebpf/gokafka/bpf_bpfel_arm64.o and /dev/null differ diff --git a/pkg/internal/ebpf/gokafka/bpf_bpfel_x86.o b/pkg/internal/ebpf/gokafka/bpf_bpfel_x86.o deleted file mode 100644 index 63c09b187..000000000 Binary files a/pkg/internal/ebpf/gokafka/bpf_bpfel_x86.o and /dev/null differ diff --git a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_arm64.o deleted file mode 100644 index df4f160bb..000000000 Binary files a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_arm64.o and /dev/null differ diff --git a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_x86.o deleted file mode 100644 index 12d3734af..000000000 Binary files a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_x86.o and /dev/null differ diff --git a/pkg/internal/ebpf/goredis/bpf_bpfel_arm64.o b/pkg/internal/ebpf/goredis/bpf_bpfel_arm64.o index c27d36143..e4b03f7d8 100644 Binary files a/pkg/internal/ebpf/goredis/bpf_bpfel_arm64.o and b/pkg/internal/ebpf/goredis/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/goredis/bpf_bpfel_x86.o b/pkg/internal/ebpf/goredis/bpf_bpfel_x86.o index fe4ba8636..0412e3148 100644 Binary files a/pkg/internal/ebpf/goredis/bpf_bpfel_x86.o and b/pkg/internal/ebpf/goredis/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/goredis/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/goredis/bpf_debug_bpfel_arm64.o index 42b4953d4..f19c8e1b1 100644 Binary files a/pkg/internal/ebpf/goredis/bpf_debug_bpfel_arm64.o and b/pkg/internal/ebpf/goredis/bpf_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/goredis/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/goredis/bpf_debug_bpfel_x86.o index b55ad6263..7e7b3f29d 100644 Binary files a/pkg/internal/ebpf/goredis/bpf_debug_bpfel_x86.o and b/pkg/internal/ebpf/goredis/bpf_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_bpfel_arm64.o b/pkg/internal/ebpf/grpc/bpf_bpfel_arm64.o index b0009d4b8..b0088b9e0 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_bpfel_arm64.o and b/pkg/internal/ebpf/grpc/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_bpfel_x86.o b/pkg/internal/ebpf/grpc/bpf_bpfel_x86.o index c6ad9f48e..0bce887cf 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_bpfel_x86.o and b/pkg/internal/ebpf/grpc/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/grpc/bpf_debug_bpfel_arm64.o index 610d044cd..afa82931c 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_debug_bpfel_arm64.o and b/pkg/internal/ebpf/grpc/bpf_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/grpc/bpf_debug_bpfel_x86.o index 205a0d747..3d51486aa 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_debug_bpfel_x86.o and b/pkg/internal/ebpf/grpc/bpf_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_tp_bpfel_arm64.o b/pkg/internal/ebpf/grpc/bpf_tp_bpfel_arm64.o index 5bacf6969..7e8f1f714 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_tp_bpfel_arm64.o and b/pkg/internal/ebpf/grpc/bpf_tp_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_tp_bpfel_x86.o b/pkg/internal/ebpf/grpc/bpf_tp_bpfel_x86.o index 97ce2ddb3..89d1222f2 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_tp_bpfel_x86.o and b/pkg/internal/ebpf/grpc/bpf_tp_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_arm64.o b/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_arm64.o index a3eec3e17..3ceeafbb7 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_arm64.o and b/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_x86.o b/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_x86.o index 8b7f119ff..510180665 100644 Binary files a/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_x86.o and b/pkg/internal/ebpf/grpc/bpf_tp_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_bpfel_arm64.o b/pkg/internal/ebpf/httpfltr/bpf_bpfel_arm64.o index b5912037e..c669318d1 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_bpfel_arm64.o and b/pkg/internal/ebpf/httpfltr/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_bpfel_x86.o b/pkg/internal/ebpf/httpfltr/bpf_bpfel_x86.o index d3d8303f9..08ea0d0d5 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_bpfel_x86.o and b/pkg/internal/ebpf/httpfltr/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_arm64.o index cf3558552..bc6a0b1e8 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_arm64.o and b/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_x86.o index f8c767c35..19e59200f 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_x86.o and b/pkg/internal/ebpf/httpfltr/bpf_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_arm64.o b/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_arm64.o index 348d0056b..3675f6ebb 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_arm64.o and b/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_x86.o b/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_x86.o index 9891a40f1..daf72e974 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_x86.o and b/pkg/internal/ebpf/httpfltr/bpf_tp_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_arm64.o b/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_arm64.o index bf2bb8726..c5c190778 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_arm64.o and b/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_x86.o b/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_x86.o index b84a19d0d..69ba3c43a 100644 Binary files a/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_x86.o and b/pkg/internal/ebpf/httpfltr/bpf_tp_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_bpfel_arm64.o b/pkg/internal/ebpf/httpssl/bpf_bpfel_arm64.o index b37d70c8e..eb4d22735 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_bpfel_arm64.o and b/pkg/internal/ebpf/httpssl/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_bpfel_x86.o b/pkg/internal/ebpf/httpssl/bpf_bpfel_x86.o index fc72d93b4..f604516d7 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_bpfel_x86.o and b/pkg/internal/ebpf/httpssl/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_arm64.o index 8258e1e50..fdaf56fc3 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_arm64.o and b/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_x86.o index 290405f8c..98e4bd260 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_x86.o and b/pkg/internal/ebpf/httpssl/bpf_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_arm64.o b/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_arm64.o index 26dccaa6b..a4aeb25e5 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_arm64.o and b/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_x86.o b/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_x86.o index 7f6e2b50c..bc9a4f902 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_x86.o and b/pkg/internal/ebpf/httpssl/bpf_tp_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_arm64.o b/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_arm64.o index a895bb8c8..26e9d4bd9 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_arm64.o and b/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_x86.o b/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_x86.o index 886858417..62a74df48 100644 Binary files a/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_x86.o and b/pkg/internal/ebpf/httpssl/bpf_tp_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/kafkago/bpf_bpfel_arm64.go b/pkg/internal/ebpf/kafkago/bpf_bpfel_arm64.go new file mode 100644 index 000000000..de90b2856 --- /dev/null +++ b/pkg/internal/ebpf/kafkago/bpf_bpfel_arm64.go @@ -0,0 +1,229 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package kafkago + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpfConnectionInfoT struct { + S_addr [16]uint8 + D_addr [16]uint8 + S_port uint16 + D_port uint16 +} + +type bpfGoroutineMetadata struct { + Parent uint64 + Timestamp uint64 +} + +type bpfKafkaGoReqT struct { + Type uint8 + StartMonotimeNs uint64 + EndMonotimeNs uint64 + Topic [64]uint8 + _ [7]byte + Conn bpfConnectionInfoT + Tp bpfTpInfoT + Pid struct { + HostPid uint32 + UserPid uint32 + Ns uint32 + } + Op uint8 + _ [7]byte +} + +type bpfProduceReqT struct { + MsgPtr uint64 + ConnPtr uint64 + StartMonotimeNs uint64 +} + +type bpfTopicT struct { + Name [64]int8 + Tp bpfTpInfoT +} + +type bpfTpInfoPidT struct { + Tp bpfTpInfoT + Pid uint32 + Valid uint8 + _ [3]byte +} + +type bpfTpInfoT struct { + TraceId [16]uint8 + SpanId [8]uint8 + ParentId [8]uint8 + Ts uint64 + Flags uint8 + _ [7]byte +} + +// loadBpf returns the embedded CollectionSpec for bpf. +func loadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf: %w", err) + } + + return spec, err +} + +// loadBpfObjects loads bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpfObjects +// *bpfPrograms +// *bpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfSpecs struct { + bpfProgramSpecs + bpfMapSpecs +} + +// bpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfProgramSpecs struct { + UprobeClientRoundTrip *ebpf.ProgramSpec `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.ProgramSpec `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.ProgramSpec `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.ProgramSpec `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.ProgramSpec `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.ProgramSpec `ebpf:"uprobe_writer_write_messages"` +} + +// bpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfMapSpecs struct { + Events *ebpf.MapSpec `ebpf:"events"` + FetchRequests *ebpf.MapSpec `ebpf:"fetch_requests"` + GoTraceMap *ebpf.MapSpec `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.MapSpec `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.MapSpec `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.MapSpec `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.MapSpec `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.MapSpec `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.MapSpec `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.MapSpec `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.MapSpec `ebpf:"produce_traceparents"` + TraceMap *ebpf.MapSpec `ebpf:"trace_map"` +} + +// bpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfObjects struct { + bpfPrograms + bpfMaps +} + +func (o *bpfObjects) Close() error { + return _BpfClose( + &o.bpfPrograms, + &o.bpfMaps, + ) +} + +// bpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfMaps struct { + Events *ebpf.Map `ebpf:"events"` + FetchRequests *ebpf.Map `ebpf:"fetch_requests"` + GoTraceMap *ebpf.Map `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.Map `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.Map `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.Map `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.Map `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.Map `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.Map `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.Map `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.Map `ebpf:"produce_traceparents"` + TraceMap *ebpf.Map `ebpf:"trace_map"` +} + +func (m *bpfMaps) Close() error { + return _BpfClose( + m.Events, + m.FetchRequests, + m.GoTraceMap, + m.GolangMapbucketStorageMap, + m.OngoingClientConnections, + m.OngoingGoroutines, + m.OngoingProduceMessages, + m.OngoingProduceTopics, + m.OngoingServerConnections, + m.ProduceRequests, + m.ProduceTraceparents, + m.TraceMap, + ) +} + +// bpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfPrograms struct { + UprobeClientRoundTrip *ebpf.Program `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.Program `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.Program `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.Program `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.Program `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.Program `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.Program `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.Program `ebpf:"uprobe_writer_write_messages"` +} + +func (p *bpfPrograms) Close() error { + return _BpfClose( + p.UprobeClientRoundTrip, + p.UprobeProtocolRoundtrip, + p.UprobeProtocolRoundtripRet, + p.UprobeReaderRead, + p.UprobeReaderReadRet, + p.UprobeReaderSendMessage, + p.UprobeWriterProduce, + p.UprobeWriterWriteMessages, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_bpfel_arm64.o +var _BpfBytes []byte diff --git a/pkg/internal/ebpf/kafkago/bpf_bpfel_arm64.o b/pkg/internal/ebpf/kafkago/bpf_bpfel_arm64.o new file mode 100644 index 000000000..2a3257e0f Binary files /dev/null and b/pkg/internal/ebpf/kafkago/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/kafkago/bpf_bpfel_x86.go b/pkg/internal/ebpf/kafkago/bpf_bpfel_x86.go new file mode 100644 index 000000000..1db84fba2 --- /dev/null +++ b/pkg/internal/ebpf/kafkago/bpf_bpfel_x86.go @@ -0,0 +1,229 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package kafkago + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpfConnectionInfoT struct { + S_addr [16]uint8 + D_addr [16]uint8 + S_port uint16 + D_port uint16 +} + +type bpfGoroutineMetadata struct { + Parent uint64 + Timestamp uint64 +} + +type bpfKafkaGoReqT struct { + Type uint8 + StartMonotimeNs uint64 + EndMonotimeNs uint64 + Topic [64]uint8 + _ [7]byte + Conn bpfConnectionInfoT + Tp bpfTpInfoT + Pid struct { + HostPid uint32 + UserPid uint32 + Ns uint32 + } + Op uint8 + _ [7]byte +} + +type bpfProduceReqT struct { + MsgPtr uint64 + ConnPtr uint64 + StartMonotimeNs uint64 +} + +type bpfTopicT struct { + Name [64]int8 + Tp bpfTpInfoT +} + +type bpfTpInfoPidT struct { + Tp bpfTpInfoT + Pid uint32 + Valid uint8 + _ [3]byte +} + +type bpfTpInfoT struct { + TraceId [16]uint8 + SpanId [8]uint8 + ParentId [8]uint8 + Ts uint64 + Flags uint8 + _ [7]byte +} + +// loadBpf returns the embedded CollectionSpec for bpf. +func loadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf: %w", err) + } + + return spec, err +} + +// loadBpfObjects loads bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpfObjects +// *bpfPrograms +// *bpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfSpecs struct { + bpfProgramSpecs + bpfMapSpecs +} + +// bpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfProgramSpecs struct { + UprobeClientRoundTrip *ebpf.ProgramSpec `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.ProgramSpec `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.ProgramSpec `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.ProgramSpec `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.ProgramSpec `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.ProgramSpec `ebpf:"uprobe_writer_write_messages"` +} + +// bpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfMapSpecs struct { + Events *ebpf.MapSpec `ebpf:"events"` + FetchRequests *ebpf.MapSpec `ebpf:"fetch_requests"` + GoTraceMap *ebpf.MapSpec `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.MapSpec `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.MapSpec `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.MapSpec `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.MapSpec `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.MapSpec `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.MapSpec `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.MapSpec `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.MapSpec `ebpf:"produce_traceparents"` + TraceMap *ebpf.MapSpec `ebpf:"trace_map"` +} + +// bpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfObjects struct { + bpfPrograms + bpfMaps +} + +func (o *bpfObjects) Close() error { + return _BpfClose( + &o.bpfPrograms, + &o.bpfMaps, + ) +} + +// bpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfMaps struct { + Events *ebpf.Map `ebpf:"events"` + FetchRequests *ebpf.Map `ebpf:"fetch_requests"` + GoTraceMap *ebpf.Map `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.Map `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.Map `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.Map `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.Map `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.Map `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.Map `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.Map `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.Map `ebpf:"produce_traceparents"` + TraceMap *ebpf.Map `ebpf:"trace_map"` +} + +func (m *bpfMaps) Close() error { + return _BpfClose( + m.Events, + m.FetchRequests, + m.GoTraceMap, + m.GolangMapbucketStorageMap, + m.OngoingClientConnections, + m.OngoingGoroutines, + m.OngoingProduceMessages, + m.OngoingProduceTopics, + m.OngoingServerConnections, + m.ProduceRequests, + m.ProduceTraceparents, + m.TraceMap, + ) +} + +// bpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfPrograms struct { + UprobeClientRoundTrip *ebpf.Program `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.Program `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.Program `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.Program `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.Program `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.Program `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.Program `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.Program `ebpf:"uprobe_writer_write_messages"` +} + +func (p *bpfPrograms) Close() error { + return _BpfClose( + p.UprobeClientRoundTrip, + p.UprobeProtocolRoundtrip, + p.UprobeProtocolRoundtripRet, + p.UprobeReaderRead, + p.UprobeReaderReadRet, + p.UprobeReaderSendMessage, + p.UprobeWriterProduce, + p.UprobeWriterWriteMessages, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_bpfel_x86.o +var _BpfBytes []byte diff --git a/pkg/internal/ebpf/kafkago/bpf_bpfel_x86.o b/pkg/internal/ebpf/kafkago/bpf_bpfel_x86.o new file mode 100644 index 000000000..013a332cc Binary files /dev/null and b/pkg/internal/ebpf/kafkago/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_arm64.go b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_arm64.go new file mode 100644 index 000000000..9f7ec6043 --- /dev/null +++ b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_arm64.go @@ -0,0 +1,232 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package kafkago + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpf_debugConnectionInfoT struct { + S_addr [16]uint8 + D_addr [16]uint8 + S_port uint16 + D_port uint16 +} + +type bpf_debugGoroutineMetadata struct { + Parent uint64 + Timestamp uint64 +} + +type bpf_debugKafkaGoReqT struct { + Type uint8 + StartMonotimeNs uint64 + EndMonotimeNs uint64 + Topic [64]uint8 + _ [7]byte + Conn bpf_debugConnectionInfoT + Tp bpf_debugTpInfoT + Pid struct { + HostPid uint32 + UserPid uint32 + Ns uint32 + } + Op uint8 + _ [7]byte +} + +type bpf_debugProduceReqT struct { + MsgPtr uint64 + ConnPtr uint64 + StartMonotimeNs uint64 +} + +type bpf_debugTopicT struct { + Name [64]int8 + Tp bpf_debugTpInfoT +} + +type bpf_debugTpInfoPidT struct { + Tp bpf_debugTpInfoT + Pid uint32 + Valid uint8 + _ [3]byte +} + +type bpf_debugTpInfoT struct { + TraceId [16]uint8 + SpanId [8]uint8 + ParentId [8]uint8 + Ts uint64 + Flags uint8 + _ [7]byte +} + +// loadBpf_debug returns the embedded CollectionSpec for bpf_debug. +func loadBpf_debug() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_Bpf_debugBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf_debug: %w", err) + } + + return spec, err +} + +// loadBpf_debugObjects loads bpf_debug and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpf_debugObjects +// *bpf_debugPrograms +// *bpf_debugMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpf_debugObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf_debug() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpf_debugSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpf_debugSpecs struct { + bpf_debugProgramSpecs + bpf_debugMapSpecs +} + +// bpf_debugSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpf_debugProgramSpecs struct { + UprobeClientRoundTrip *ebpf.ProgramSpec `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.ProgramSpec `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.ProgramSpec `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.ProgramSpec `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.ProgramSpec `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.ProgramSpec `ebpf:"uprobe_writer_write_messages"` +} + +// bpf_debugMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpf_debugMapSpecs struct { + DebugEvents *ebpf.MapSpec `ebpf:"debug_events"` + Events *ebpf.MapSpec `ebpf:"events"` + FetchRequests *ebpf.MapSpec `ebpf:"fetch_requests"` + GoTraceMap *ebpf.MapSpec `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.MapSpec `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.MapSpec `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.MapSpec `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.MapSpec `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.MapSpec `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.MapSpec `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.MapSpec `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.MapSpec `ebpf:"produce_traceparents"` + TraceMap *ebpf.MapSpec `ebpf:"trace_map"` +} + +// bpf_debugObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpf_debugObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpf_debugObjects struct { + bpf_debugPrograms + bpf_debugMaps +} + +func (o *bpf_debugObjects) Close() error { + return _Bpf_debugClose( + &o.bpf_debugPrograms, + &o.bpf_debugMaps, + ) +} + +// bpf_debugMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpf_debugObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpf_debugMaps struct { + DebugEvents *ebpf.Map `ebpf:"debug_events"` + Events *ebpf.Map `ebpf:"events"` + FetchRequests *ebpf.Map `ebpf:"fetch_requests"` + GoTraceMap *ebpf.Map `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.Map `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.Map `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.Map `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.Map `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.Map `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.Map `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.Map `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.Map `ebpf:"produce_traceparents"` + TraceMap *ebpf.Map `ebpf:"trace_map"` +} + +func (m *bpf_debugMaps) Close() error { + return _Bpf_debugClose( + m.DebugEvents, + m.Events, + m.FetchRequests, + m.GoTraceMap, + m.GolangMapbucketStorageMap, + m.OngoingClientConnections, + m.OngoingGoroutines, + m.OngoingProduceMessages, + m.OngoingProduceTopics, + m.OngoingServerConnections, + m.ProduceRequests, + m.ProduceTraceparents, + m.TraceMap, + ) +} + +// bpf_debugPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpf_debugObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpf_debugPrograms struct { + UprobeClientRoundTrip *ebpf.Program `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.Program `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.Program `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.Program `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.Program `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.Program `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.Program `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.Program `ebpf:"uprobe_writer_write_messages"` +} + +func (p *bpf_debugPrograms) Close() error { + return _Bpf_debugClose( + p.UprobeClientRoundTrip, + p.UprobeProtocolRoundtrip, + p.UprobeProtocolRoundtripRet, + p.UprobeReaderRead, + p.UprobeReaderReadRet, + p.UprobeReaderSendMessage, + p.UprobeWriterProduce, + p.UprobeWriterWriteMessages, + ) +} + +func _Bpf_debugClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_debug_bpfel_arm64.o +var _Bpf_debugBytes []byte diff --git a/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_arm64.o new file mode 100644 index 000000000..4bbf4f4e8 Binary files /dev/null and b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_x86.go b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_x86.go new file mode 100644 index 000000000..f4a03ab3a --- /dev/null +++ b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_x86.go @@ -0,0 +1,232 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package kafkago + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpf_debugConnectionInfoT struct { + S_addr [16]uint8 + D_addr [16]uint8 + S_port uint16 + D_port uint16 +} + +type bpf_debugGoroutineMetadata struct { + Parent uint64 + Timestamp uint64 +} + +type bpf_debugKafkaGoReqT struct { + Type uint8 + StartMonotimeNs uint64 + EndMonotimeNs uint64 + Topic [64]uint8 + _ [7]byte + Conn bpf_debugConnectionInfoT + Tp bpf_debugTpInfoT + Pid struct { + HostPid uint32 + UserPid uint32 + Ns uint32 + } + Op uint8 + _ [7]byte +} + +type bpf_debugProduceReqT struct { + MsgPtr uint64 + ConnPtr uint64 + StartMonotimeNs uint64 +} + +type bpf_debugTopicT struct { + Name [64]int8 + Tp bpf_debugTpInfoT +} + +type bpf_debugTpInfoPidT struct { + Tp bpf_debugTpInfoT + Pid uint32 + Valid uint8 + _ [3]byte +} + +type bpf_debugTpInfoT struct { + TraceId [16]uint8 + SpanId [8]uint8 + ParentId [8]uint8 + Ts uint64 + Flags uint8 + _ [7]byte +} + +// loadBpf_debug returns the embedded CollectionSpec for bpf_debug. +func loadBpf_debug() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_Bpf_debugBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf_debug: %w", err) + } + + return spec, err +} + +// loadBpf_debugObjects loads bpf_debug and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpf_debugObjects +// *bpf_debugPrograms +// *bpf_debugMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpf_debugObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf_debug() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpf_debugSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpf_debugSpecs struct { + bpf_debugProgramSpecs + bpf_debugMapSpecs +} + +// bpf_debugSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpf_debugProgramSpecs struct { + UprobeClientRoundTrip *ebpf.ProgramSpec `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.ProgramSpec `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.ProgramSpec `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.ProgramSpec `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.ProgramSpec `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.ProgramSpec `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.ProgramSpec `ebpf:"uprobe_writer_write_messages"` +} + +// bpf_debugMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpf_debugMapSpecs struct { + DebugEvents *ebpf.MapSpec `ebpf:"debug_events"` + Events *ebpf.MapSpec `ebpf:"events"` + FetchRequests *ebpf.MapSpec `ebpf:"fetch_requests"` + GoTraceMap *ebpf.MapSpec `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.MapSpec `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.MapSpec `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.MapSpec `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.MapSpec `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.MapSpec `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.MapSpec `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.MapSpec `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.MapSpec `ebpf:"produce_traceparents"` + TraceMap *ebpf.MapSpec `ebpf:"trace_map"` +} + +// bpf_debugObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpf_debugObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpf_debugObjects struct { + bpf_debugPrograms + bpf_debugMaps +} + +func (o *bpf_debugObjects) Close() error { + return _Bpf_debugClose( + &o.bpf_debugPrograms, + &o.bpf_debugMaps, + ) +} + +// bpf_debugMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpf_debugObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpf_debugMaps struct { + DebugEvents *ebpf.Map `ebpf:"debug_events"` + Events *ebpf.Map `ebpf:"events"` + FetchRequests *ebpf.Map `ebpf:"fetch_requests"` + GoTraceMap *ebpf.Map `ebpf:"go_trace_map"` + GolangMapbucketStorageMap *ebpf.Map `ebpf:"golang_mapbucket_storage_map"` + OngoingClientConnections *ebpf.Map `ebpf:"ongoing_client_connections"` + OngoingGoroutines *ebpf.Map `ebpf:"ongoing_goroutines"` + OngoingProduceMessages *ebpf.Map `ebpf:"ongoing_produce_messages"` + OngoingProduceTopics *ebpf.Map `ebpf:"ongoing_produce_topics"` + OngoingServerConnections *ebpf.Map `ebpf:"ongoing_server_connections"` + ProduceRequests *ebpf.Map `ebpf:"produce_requests"` + ProduceTraceparents *ebpf.Map `ebpf:"produce_traceparents"` + TraceMap *ebpf.Map `ebpf:"trace_map"` +} + +func (m *bpf_debugMaps) Close() error { + return _Bpf_debugClose( + m.DebugEvents, + m.Events, + m.FetchRequests, + m.GoTraceMap, + m.GolangMapbucketStorageMap, + m.OngoingClientConnections, + m.OngoingGoroutines, + m.OngoingProduceMessages, + m.OngoingProduceTopics, + m.OngoingServerConnections, + m.ProduceRequests, + m.ProduceTraceparents, + m.TraceMap, + ) +} + +// bpf_debugPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpf_debugObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpf_debugPrograms struct { + UprobeClientRoundTrip *ebpf.Program `ebpf:"uprobe_client_roundTrip"` + UprobeProtocolRoundtrip *ebpf.Program `ebpf:"uprobe_protocol_roundtrip"` + UprobeProtocolRoundtripRet *ebpf.Program `ebpf:"uprobe_protocol_roundtrip_ret"` + UprobeReaderRead *ebpf.Program `ebpf:"uprobe_reader_read"` + UprobeReaderReadRet *ebpf.Program `ebpf:"uprobe_reader_read_ret"` + UprobeReaderSendMessage *ebpf.Program `ebpf:"uprobe_reader_send_message"` + UprobeWriterProduce *ebpf.Program `ebpf:"uprobe_writer_produce"` + UprobeWriterWriteMessages *ebpf.Program `ebpf:"uprobe_writer_write_messages"` +} + +func (p *bpf_debugPrograms) Close() error { + return _Bpf_debugClose( + p.UprobeClientRoundTrip, + p.UprobeProtocolRoundtrip, + p.UprobeProtocolRoundtripRet, + p.UprobeReaderRead, + p.UprobeReaderReadRet, + p.UprobeReaderSendMessage, + p.UprobeWriterProduce, + p.UprobeWriterWriteMessages, + ) +} + +func _Bpf_debugClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_debug_bpfel_x86.o +var _Bpf_debugBytes []byte diff --git a/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_x86.o new file mode 100644 index 000000000..c4d709b79 Binary files /dev/null and b/pkg/internal/ebpf/kafkago/bpf_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/kafkago/kafkago.go b/pkg/internal/ebpf/kafkago/kafkago.go new file mode 100644 index 000000000..a484ffa99 --- /dev/null +++ b/pkg/internal/ebpf/kafkago/kafkago.go @@ -0,0 +1,162 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkago + +import ( + "context" + "io" + "log/slog" + "unsafe" + + "github.com/cilium/ebpf" + + "github.com/grafana/beyla/pkg/beyla" + ebpfcommon "github.com/grafana/beyla/pkg/internal/ebpf/common" + "github.com/grafana/beyla/pkg/internal/exec" + "github.com/grafana/beyla/pkg/internal/goexec" + "github.com/grafana/beyla/pkg/internal/imetrics" + "github.com/grafana/beyla/pkg/internal/request" + "github.com/grafana/beyla/pkg/internal/svc" +) + +//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 bpf ../../../../bpf/go_kafka_go.c -- -I../../../../bpf/headers +//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 bpf_debug ../../../../bpf/go_kafka_go.c -- -I../../../../bpf/headers -DBPF_DEBUG + +type Tracer struct { + log *slog.Logger + pidsFilter ebpfcommon.ServiceFilter + cfg *ebpfcommon.TracerConfig + metrics imetrics.Reporter + bpfObjects bpfObjects + closers []io.Closer +} + +func New(cfg *beyla.Config, metrics imetrics.Reporter) *Tracer { + log := slog.With("component", "kafkago.Tracer") + return &Tracer{ + log: log, + cfg: &cfg.EBPF, + metrics: metrics, + pidsFilter: ebpfcommon.CommonPIDsFilter(cfg.Discovery.SystemWide), + } +} + +func (p *Tracer) AllowPID(pid, ns uint32, svc svc.ID) { + p.pidsFilter.AllowPID(pid, ns, svc, ebpfcommon.PIDTypeGo) +} + +func (p *Tracer) BlockPID(pid, ns uint32) { + p.pidsFilter.BlockPID(pid, ns) +} + +func (p *Tracer) Load() (*ebpf.CollectionSpec, error) { + loader := loadBpf + if p.cfg.BpfDebug { + loader = loadBpf_debug + } + return loader() +} + +func (p *Tracer) SetupTailCalls() {} + +func (p *Tracer) Constants(_ *exec.FileInfo, offsets *goexec.Offsets) map[string]any { + // Set the field offsets and the logLevel for grpc BPF program, + // as well as some other configuration constants + constants := map[string]any{ + "wakeup_data_bytes": uint32(p.cfg.WakeupLen) * uint32(unsafe.Sizeof(ebpfcommon.HTTPRequestTrace{})), + } + + for _, s := range []string{ + "kafka_go_writer_topic_pos", + "kafka_go_protocol_conn_pos", + "kafka_go_reader_topic_pos", + "conn_fd_pos", + "fd_laddr_pos", + "fd_raddr_pos", + "tcp_addr_port_ptr_pos", + "tcp_addr_ip_ptr_pos", + } { + constants[s] = offsets.Field[s] + } + return constants +} + +func (p *Tracer) BpfObjects() any { + return &p.bpfObjects +} + +func (p *Tracer) AddCloser(c ...io.Closer) { + p.closers = append(p.closers, c...) +} + +func (p *Tracer) GoProbes() map[string]ebpfcommon.FunctionPrograms { + return map[string]ebpfcommon.FunctionPrograms{ + "github.com/segmentio/kafka-go.(*Writer).WriteMessages": { // runs on the same gorountine as other requests, finds traceparent info + Start: p.bpfObjects.UprobeWriterWriteMessages, + Required: true, + }, + "github.com/segmentio/kafka-go.(*Writer).produce": { // stores the current topic + Start: p.bpfObjects.UprobeWriterProduce, + Required: true, + }, + "github.com/segmentio/kafka-go.(*Client).roundTrip": { // has the goroutine connection with (*Writer).produce and msg* connection with protocol.RoundTrip + Start: p.bpfObjects.UprobeClientRoundTrip, + Required: true, + }, + "github.com/segmentio/kafka-go/protocol.RoundTrip": { // used for collecting the connection information + Start: p.bpfObjects.UprobeProtocolRoundtrip, + End: p.bpfObjects.UprobeProtocolRoundtripRet, + Required: true, + }, + "github.com/segmentio/kafka-go.(*reader).read": { // used for capturing the info for the fetch operations + Start: p.bpfObjects.UprobeReaderRead, + End: p.bpfObjects.UprobeReaderReadRet, + Required: true, + }, + "github.com/segmentio/kafka-go.(*reader).sendMessage": { // to accurately measure the start time + Start: p.bpfObjects.UprobeReaderSendMessage, + Required: true, + }, + } +} + +func (p *Tracer) KProbes() map[string]ebpfcommon.FunctionPrograms { + return nil +} + +func (p *Tracer) UProbes() map[string]map[string]ebpfcommon.FunctionPrograms { + return nil +} + +func (p *Tracer) Tracepoints() map[string]ebpfcommon.FunctionPrograms { + return nil +} + +func (p *Tracer) SocketFilters() []*ebpf.Program { + return nil +} + +func (p *Tracer) RecordInstrumentedLib(_ uint64) {} + +func (p *Tracer) AlreadyInstrumentedLib(_ uint64) bool { + return false +} + +func (p *Tracer) Run(ctx context.Context, eventsChan chan<- []request.Span) { + ebpfcommon.SharedRingbuf( + p.cfg, + p.pidsFilter, + p.bpfObjects.Events, + p.metrics, + )(ctx, append(p.closers, &p.bpfObjects), eventsChan) +} diff --git a/pkg/internal/ebpf/nethttp/bpf_bpfel_arm64.o b/pkg/internal/ebpf/nethttp/bpf_bpfel_arm64.o index 79b39b786..fed524eee 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_bpfel_arm64.o and b/pkg/internal/ebpf/nethttp/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/nethttp/bpf_bpfel_x86.o b/pkg/internal/ebpf/nethttp/bpf_bpfel_x86.o index 8b15b3333..6c38674ef 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_bpfel_x86.o and b/pkg/internal/ebpf/nethttp/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_arm64.o index 6229c0232..97e99caf9 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_arm64.o and b/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_x86.o index 6c0700fac..19a9fb674 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_x86.o and b/pkg/internal/ebpf/nethttp/bpf_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_arm64.o b/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_arm64.o index 2f2269c6b..87ff6e8bc 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_arm64.o and b/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_x86.o b/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_x86.o index 87183f872..22b31db40 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_x86.o and b/pkg/internal/ebpf/nethttp/bpf_tp_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_arm64.o b/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_arm64.o index cc1875627..31eb71c8c 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_arm64.o and b/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_x86.o b/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_x86.o index 4408e632d..f9af4655c 100644 Binary files a/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_x86.o and b/pkg/internal/ebpf/nethttp/bpf_tp_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/gokafka/bpf_bpfel_arm64.go b/pkg/internal/ebpf/sarama/bpf_bpfel_arm64.go similarity index 99% rename from pkg/internal/ebpf/gokafka/bpf_bpfel_arm64.go rename to pkg/internal/ebpf/sarama/bpf_bpfel_arm64.go index fff66a7de..ea688cf14 100644 --- a/pkg/internal/ebpf/gokafka/bpf_bpfel_arm64.go +++ b/pkg/internal/ebpf/sarama/bpf_bpfel_arm64.go @@ -1,7 +1,7 @@ // Code generated by bpf2go; DO NOT EDIT. //go:build arm64 -package gokafka +package sarama import ( "bytes" diff --git a/pkg/internal/ebpf/sarama/bpf_bpfel_arm64.o b/pkg/internal/ebpf/sarama/bpf_bpfel_arm64.o new file mode 100644 index 000000000..de95d9dfc Binary files /dev/null and b/pkg/internal/ebpf/sarama/bpf_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/gokafka/bpf_bpfel_x86.go b/pkg/internal/ebpf/sarama/bpf_bpfel_x86.go similarity index 99% rename from pkg/internal/ebpf/gokafka/bpf_bpfel_x86.go rename to pkg/internal/ebpf/sarama/bpf_bpfel_x86.go index e678dabb9..b1ecebf95 100644 --- a/pkg/internal/ebpf/gokafka/bpf_bpfel_x86.go +++ b/pkg/internal/ebpf/sarama/bpf_bpfel_x86.go @@ -1,7 +1,7 @@ // Code generated by bpf2go; DO NOT EDIT. //go:build 386 || amd64 -package gokafka +package sarama import ( "bytes" diff --git a/pkg/internal/ebpf/sarama/bpf_bpfel_x86.o b/pkg/internal/ebpf/sarama/bpf_bpfel_x86.o new file mode 100644 index 000000000..4926978c8 Binary files /dev/null and b/pkg/internal/ebpf/sarama/bpf_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_arm64.go b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_arm64.go similarity index 99% rename from pkg/internal/ebpf/gokafka/bpf_debug_bpfel_arm64.go rename to pkg/internal/ebpf/sarama/bpf_debug_bpfel_arm64.go index 3c30e3c7e..97acab76e 100644 --- a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_arm64.go +++ b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_arm64.go @@ -1,7 +1,7 @@ // Code generated by bpf2go; DO NOT EDIT. //go:build arm64 -package gokafka +package sarama import ( "bytes" diff --git a/pkg/internal/ebpf/sarama/bpf_debug_bpfel_arm64.o b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_arm64.o new file mode 100644 index 000000000..66299b005 Binary files /dev/null and b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_arm64.o differ diff --git a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_x86.go b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_x86.go similarity index 99% rename from pkg/internal/ebpf/gokafka/bpf_debug_bpfel_x86.go rename to pkg/internal/ebpf/sarama/bpf_debug_bpfel_x86.go index 66fa04cd5..79a2370bc 100644 --- a/pkg/internal/ebpf/gokafka/bpf_debug_bpfel_x86.go +++ b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_x86.go @@ -1,7 +1,7 @@ // Code generated by bpf2go; DO NOT EDIT. //go:build 386 || amd64 -package gokafka +package sarama import ( "bytes" diff --git a/pkg/internal/ebpf/sarama/bpf_debug_bpfel_x86.o b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_x86.o new file mode 100644 index 000000000..d13aa40c3 Binary files /dev/null and b/pkg/internal/ebpf/sarama/bpf_debug_bpfel_x86.o differ diff --git a/pkg/internal/ebpf/gokafka/gokafka.go b/pkg/internal/ebpf/sarama/sarama.go similarity index 95% rename from pkg/internal/ebpf/gokafka/gokafka.go rename to pkg/internal/ebpf/sarama/sarama.go index 320d15ad5..aefb23023 100644 --- a/pkg/internal/ebpf/gokafka/gokafka.go +++ b/pkg/internal/ebpf/sarama/sarama.go @@ -10,7 +10,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package gokafka +package sarama import ( "context" @@ -29,8 +29,8 @@ import ( "github.com/grafana/beyla/pkg/internal/svc" ) -//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 bpf ../../../../bpf/go_kafka.c -- -I../../../../bpf/headers -//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 bpf_debug ../../../../bpf/go_kafka.c -- -I../../../../bpf/headers -DBPF_DEBUG +//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 bpf ../../../../bpf/go_sarama.c -- -I../../../../bpf/headers +//go:generate $BPF2GO -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64 bpf_debug ../../../../bpf/go_sarama.c -- -I../../../../bpf/headers -DBPF_DEBUG type Tracer struct { log *slog.Logger @@ -42,7 +42,7 @@ type Tracer struct { } func New(cfg *beyla.Config, metrics imetrics.Reporter) *Tracer { - log := slog.With("component", "gokafka.Tracer") + log := slog.With("component", "sarama.Tracer") return &Tracer{ log: log, cfg: &cfg.EBPF, diff --git a/pkg/internal/goexec/offsets.json b/pkg/internal/goexec/offsets.json index 3514370ba..f48f8b2b5 100755 --- a/pkg/internal/goexec/offsets.json +++ b/pkg/internal/goexec/offsets.json @@ -162,6 +162,48 @@ ] } }, + "github.com/segmentio/kafka-go.Writer": { + "Topic": { + "versions": { + "oldest": "0.4.11", + "newest": "0.4.47" + }, + "offsets": [ + { + "offset": 16, + "since": "0.4.11" + } + ] + } + }, + "github.com/segmentio/kafka-go.reader": { + "topic": { + "versions": { + "oldest": "0.4.11", + "newest": "0.4.47" + }, + "offsets": [ + { + "offset": 64, + "since": "0.4.11" + } + ] + } + }, + "github.com/segmentio/kafka-go/protocol.Conn": { + "conn": { + "versions": { + "oldest": "0.4.11", + "newest": "0.4.47" + }, + "offsets": [ + { + "offset": 8, + "since": "0.4.11" + } + ] + } + }, "golang.org/x/net/http2.ClientConn": { "nextStreamID": { "versions": { diff --git a/pkg/internal/goexec/structmembers.go b/pkg/internal/goexec/structmembers.go index f6e16f470..55ef2218a 100644 --- a/pkg/internal/goexec/structmembers.go +++ b/pkg/internal/goexec/structmembers.go @@ -221,6 +221,24 @@ var structMembers = map[string]structInfo{ "bw": "redis_conn_bw_pos", }, }, + "github.com/segmentio/kafka-go.Writer": { + lib: "github.com/segmentio/kafka-go", + fields: map[string]string{ + "Topic": "kafka_go_writer_topic_pos", + }, + }, + "github.com/segmentio/kafka-go/protocol.Conn": { + lib: "github.com/segmentio/kafka-go", + fields: map[string]string{ + "conn": "kafka_go_protocol_conn_pos", + }, + }, + "github.com/segmentio/kafka-go.reader": { + lib: "github.com/segmentio/kafka-go", + fields: map[string]string{ + "topic": "kafka_go_reader_topic_pos", + }, + }, } func structMemberOffsets(elfFile *elf.File) (FieldOffsets, error) { diff --git a/test/integration/components/gokafka-seg/Dockerfile b/test/integration/components/gokafka-seg/Dockerfile new file mode 100644 index 000000000..4d4cf69bb --- /dev/null +++ b/test/integration/components/gokafka-seg/Dockerfile @@ -0,0 +1,28 @@ +# Build the testserver binary +# Docker command must be invoked from the projec root directory +FROM golang:1.22 AS builder + +ARG TARGETARCH + +ENV GOARCH=$TARGETARCH + +WORKDIR /src + +# Copy the go manifests and source +COPY test/integration/components/gokafka-seg/ . + +# Build +RUN go build -o testserver main.go + +# Create final image from minimal + built binary +FROM debian:bookworm-slim + +WORKDIR / +COPY --from=builder /src/testserver . +USER 0:0 + +ENV kafkaURL="kafka:9092" +ENV topic="logging" +ENV groupID="1" + +CMD [ "/testserver" ] \ No newline at end of file diff --git a/test/integration/components/gokafka-seg/go.mod b/test/integration/components/gokafka-seg/go.mod new file mode 100644 index 000000000..a210dce3e --- /dev/null +++ b/test/integration/components/gokafka-seg/go.mod @@ -0,0 +1,10 @@ +module grafana.com/gokafka-seg + +go 1.22.2 + +require github.com/segmentio/kafka-go v0.4.47 + +require ( + github.com/klauspost/compress v1.15.9 // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect +) diff --git a/test/integration/components/gokafka-seg/go.sum b/test/integration/components/gokafka-seg/go.sum new file mode 100644 index 000000000..3c1b808d0 --- /dev/null +++ b/test/integration/components/gokafka-seg/go.sum @@ -0,0 +1,68 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= +github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/integration/components/gokafka-seg/main.go b/test/integration/components/gokafka-seg/main.go new file mode 100644 index 000000000..c3bb3047a --- /dev/null +++ b/test/integration/components/gokafka-seg/main.go @@ -0,0 +1,95 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" + + kafka "github.com/segmentio/kafka-go" +) + +func producerHandler(kafkaWriter *kafka.Writer) func(http.ResponseWriter, *http.Request) { + return http.HandlerFunc(func(wrt http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + if err != nil { + log.Fatalln(err) + } + msg := kafka.Message{ + Key: []byte(fmt.Sprintf("address-%s", req.RemoteAddr)), + Value: body, + } + err = kafkaWriter.WriteMessages(req.Context(), msg) + + if err != nil { + fmt.Printf("error %v\n", err) + } + }) +} + +func getKafkaWriter(kafkaURL, topic string) *kafka.Writer { + return &kafka.Writer{ + Addr: kafka.TCP(kafkaURL), + Topic: topic, + Balancer: &kafka.LeastBytes{}, + } +} + +func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader { + brokers := strings.Split(kafkaURL, ",") + return kafka.NewReader(kafka.ReaderConfig{ + Brokers: brokers, + GroupID: groupID, + Topic: topic, + MinBytes: 10e3, // 10KB + MaxBytes: 10e6, // 10MB + }) +} + +func main() { + // get kafka writer using environment variables. + kafkaURL := os.Getenv("kafkaURL") + topic := os.Getenv("topic") + + for { + client := kafka.Client{ + Addr: kafka.TCP(kafkaURL), + } + _, err := client.Metadata(context.Background(), &kafka.MetadataRequest{}) + if err == nil { + break + } + fmt.Printf("Waiting on kafka to start ...\n") + time.Sleep(2 * time.Second) + } + + kafkaWriter := getKafkaWriter(kafkaURL, topic) + defer kafkaWriter.Close() + + groupID := os.Getenv("groupID") + + reader := getKafkaReader(kafkaURL, topic, groupID) + defer reader.Close() + + go func() { + fmt.Println("start consuming ... !!") + for { + m, err := reader.ReadMessage(context.Background()) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("message at topic:%v partition:%v offset:%v %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) + } + }() + + // Add handle func for producer. + http.HandleFunc("/", producerHandler(kafkaWriter)) + + // Run the web server. + fmt.Println("started test server on port 8080 ...") + log.Fatal(http.ListenAndServe(":8080", nil)) +} diff --git a/test/integration/components/gokafka-seg/run.sh b/test/integration/components/gokafka-seg/run.sh new file mode 100755 index 000000000..b7eaae8a5 --- /dev/null +++ b/test/integration/components/gokafka-seg/run.sh @@ -0,0 +1,6 @@ +export kafkaURL="127.0.0.1:9093" +export topic="logging" +export groupID="1" + +./gokafka +# curl http://localhost:8080/whatever diff --git a/test/integration/configs/instrumenter-config-http2.yml b/test/integration/configs/instrumenter-config-http2.yml index ddc06a55d..194f26994 100644 --- a/test/integration/configs/instrumenter-config-http2.yml +++ b/test/integration/configs/instrumenter-config-http2.yml @@ -1,7 +1,10 @@ routes: unmatched: path -otel_metrics_export: - endpoint: http://otelcol:4018 +prometheus_export: + port: 8999 + features: + - application + - application_process otel_traces_export: endpoint: http://jaeger:4318 discovery: diff --git a/test/integration/configs/otelcol-config.yml b/test/integration/configs/otelcol-config.yml index 32d73b23b..ea04ace07 100644 --- a/test/integration/configs/otelcol-config.yml +++ b/test/integration/configs/otelcol-config.yml @@ -2,7 +2,9 @@ receivers: otlp: protocols: grpc: + endpoint: 0.0.0.0:4317 http: + endpoint: 0.0.0.0:4318 cors: allowed_origins: - "http://*" diff --git a/test/integration/docker-compose-1.16.yml b/test/integration/docker-compose-1.16.yml index dd846c534..6049dd9af 100644 --- a/test/integration/docker-compose-1.16.yml +++ b/test/integration/docker-compose-1.16.yml @@ -45,7 +45,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-1.17.yml b/test/integration/docker-compose-1.17.yml index 23e4a7dff..5b60b1592 100644 --- a/test/integration/docker-compose-1.17.yml +++ b/test/integration/docker-compose-1.17.yml @@ -50,7 +50,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -87,7 +87,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-client.yml b/test/integration/docker-compose-client.yml index a662ec459..9c88ac5fc 100644 --- a/test/integration/docker-compose-client.yml +++ b/test/integration/docker-compose-client.yml @@ -45,7 +45,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -82,7 +82,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-dotnet.yml b/test/integration/docker-compose-dotnet.yml index 02e806224..b69535adb 100644 --- a/test/integration/docker-compose-dotnet.yml +++ b/test/integration/docker-compose-dotnet.yml @@ -50,7 +50,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-elixir.yml b/test/integration/docker-compose-elixir.yml index d0db0522d..caf4db10e 100644 --- a/test/integration/docker-compose-elixir.yml +++ b/test/integration/docker-compose-elixir.yml @@ -46,7 +46,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-http2.yml b/test/integration/docker-compose-http2.yml index 28d837ef7..aac5b8f0c 100644 --- a/test/integration/docker-compose-http2.yml +++ b/test/integration/docker-compose-http2.yml @@ -20,7 +20,7 @@ services: ports: - "7373:7373" depends_on: - otelcol: + prometheus: condition: service_started jaeger: condition: service_started @@ -53,26 +53,39 @@ services: testclient: condition: service_started + # otelcol: + # image: grafana/agent + # container_name: demo-agent + # command: + # - run + # - /etc/agent/agent-config.river + # volumes: + # - ./configs/:/etc/agent + # environment: + # AGENT_MODE: "flow" + # ports: + # - "4017:4017" + # - "4018:4018" + # depends_on: + # prometheus: + # condition: service_started + # OpenTelemetry Collector - otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 - container_name: otel-col - deploy: - resources: - limits: - memory: 125M - restart: unless-stopped - command: [ "--config=/etc/otelcol-config/otelcol-config-4017.yml" ] - volumes: - - ./configs/:/etc/otelcol-config - ports: - - "4017" # OTLP over gRPC receiver - - "4018:4018" # OTLP over HTTP receiver - - "9464" # Prometheus exporter - - "8888" # metrics endpoint - depends_on: - prometheus: - condition: service_started + # otelcol: + # image: otel/opentelemetry-collector-contrib:0.103.0 + # container_name: otel-col + # restart: unless-stopped + # command: [ "--config=/etc/otelcol-config/otelcol-config-4017.yml" ] + # volumes: + # - ./configs/:/etc/otelcol-config + # ports: + # - "4017" # OTLP over gRPC receiver + # - "4018:4018" # OTLP over HTTP receiver + # - "9464" # Prometheus exporter + # - "8888" # metrics endpoint + # depends_on: + # prometheus: + # condition: service_started # Prometheus prometheus: @@ -80,7 +93,7 @@ services: container_name: prometheus command: - --storage.tsdb.retention.time=1m - - --config.file=/etc/prometheus/prometheus-config.yml + - --config.file=/etc/prometheus/prometheus-config-promscrape.yml - --storage.tsdb.path=/prometheus - --web.enable-lifecycle - --web.route-prefix=/ @@ -90,7 +103,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317:4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-java-host.yml b/test/integration/docker-compose-java-host.yml index 39f6f6b6f..82d059e59 100644 --- a/test/integration/docker-compose-java-host.yml +++ b/test/integration/docker-compose-java-host.yml @@ -42,7 +42,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-java-pid.yml b/test/integration/docker-compose-java-pid.yml index e258c25bc..7dd4356b8 100644 --- a/test/integration/docker-compose-java-pid.yml +++ b/test/integration/docker-compose-java-pid.yml @@ -43,7 +43,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-java-system-wide.yml b/test/integration/docker-compose-java-system-wide.yml index 29afdf29c..74525e707 100644 --- a/test/integration/docker-compose-java-system-wide.yml +++ b/test/integration/docker-compose-java-system-wide.yml @@ -59,7 +59,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-java.yml b/test/integration/docker-compose-java.yml index e22ff802a..9e91be4e9 100644 --- a/test/integration/docker-compose-java.yml +++ b/test/integration/docker-compose-java.yml @@ -47,7 +47,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-multiexec.yml b/test/integration/docker-compose-multiexec.yml index e6d081837..925c9a598 100644 --- a/test/integration/docker-compose-multiexec.yml +++ b/test/integration/docker-compose-multiexec.yml @@ -146,7 +146,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -182,7 +182,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317:4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-netolly-direction.yml b/test/integration/docker-compose-netolly-direction.yml index 34016418b..f99303361 100644 --- a/test/integration/docker-compose-netolly-direction.yml +++ b/test/integration/docker-compose-netolly-direction.yml @@ -51,7 +51,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-netolly.yml b/test/integration/docker-compose-netolly.yml index 475852884..9044e7f32 100644 --- a/test/integration/docker-compose-netolly.yml +++ b/test/integration/docker-compose-netolly.yml @@ -42,7 +42,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-nodeclient.yml b/test/integration/docker-compose-nodeclient.yml index 50632bc6c..77c730674 100644 --- a/test/integration/docker-compose-nodeclient.yml +++ b/test/integration/docker-compose-nodeclient.yml @@ -51,7 +51,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -88,7 +88,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-nodejs.yml b/test/integration/docker-compose-nodejs.yml index 87cdde637..9c891309d 100644 --- a/test/integration/docker-compose-nodejs.yml +++ b/test/integration/docker-compose-nodejs.yml @@ -52,7 +52,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -87,7 +87,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-other-grpc.yml b/test/integration/docker-compose-other-grpc.yml index 930310f92..0f6a26175 100644 --- a/test/integration/docker-compose-other-grpc.yml +++ b/test/integration/docker-compose-other-grpc.yml @@ -65,7 +65,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -102,7 +102,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-python-sql.yml b/test/integration/docker-compose-python-sql.yml index cc6941c34..24c8b932f 100644 --- a/test/integration/docker-compose-python-sql.yml +++ b/test/integration/docker-compose-python-sql.yml @@ -63,7 +63,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -98,7 +98,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose-python.yml b/test/integration/docker-compose-python.yml index 2b11e3e39..ac43490ac 100644 --- a/test/integration/docker-compose-python.yml +++ b/test/integration/docker-compose-python.yml @@ -49,7 +49,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-ruby.yml b/test/integration/docker-compose-ruby.yml index 98ed59257..60e7ba9f8 100644 --- a/test/integration/docker-compose-ruby.yml +++ b/test/integration/docker-compose-ruby.yml @@ -44,7 +44,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: diff --git a/test/integration/docker-compose-rust.yml b/test/integration/docker-compose-rust.yml index 5f08c2957..11daf5c68 100644 --- a/test/integration/docker-compose-rust.yml +++ b/test/integration/docker-compose-rust.yml @@ -48,7 +48,7 @@ services: # OpenTelemetry Collector otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -83,7 +83,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 002fb9c79..082cfc559 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -54,7 +54,7 @@ services: # OpenTelemetry Collector for Metrics. For Traces, we use directly Jaeger otelcol: - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 container_name: otel-col deploy: resources: @@ -91,7 +91,7 @@ services: - "9090:9090" jaeger: - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - "16686:16686" # Query frontend - "4317" # OTEL GRPC traces collector diff --git a/test/integration/k8s/daemonset/k8s_daemonset_main_test.go b/test/integration/k8s/daemonset/k8s_daemonset_main_test.go index 830984928..78d2699d9 100644 --- a/test/integration/k8s/daemonset/k8s_daemonset_main_test.go +++ b/test/integration/k8s/daemonset/k8s_daemonset_main_test.go @@ -29,7 +29,7 @@ func TestMain(m *testing.M) { docker.ImageBuild{Tag: "grpcpinger:dev", Dockerfile: k8s.DockerfilePinger}, docker.ImageBuild{Tag: "quay.io/prometheus/prometheus:v2.53.0"}, docker.ImageBuild{Tag: "otel/opentelemetry-collector-contrib:0.103.0"}, - docker.ImageBuild{Tag: "jaegertracing/all-in-one:1"}, + docker.ImageBuild{Tag: "jaegertracing/all-in-one:1.57"}, ); err != nil { slog.Error("can't build docker images", err) os.Exit(-1) @@ -43,7 +43,7 @@ func TestMain(m *testing.M) { kube.LocalImage("grpcpinger:dev"), kube.LocalImage("quay.io/prometheus/prometheus:v2.53.0"), kube.LocalImage("otel/opentelemetry-collector-contrib:0.103.0"), - kube.LocalImage("jaegertracing/all-in-one:1"), + kube.LocalImage("jaegertracing/all-in-one:1.57"), kube.Deploy(k8s.PathManifests+"/01-volumes.yml"), kube.Deploy(k8s.PathManifests+"/01-serviceaccount.yml"), kube.Deploy(k8s.PathManifests+"/02-prometheus-otelscrape.yml"), diff --git a/test/integration/k8s/daemonset_python/k8s_daemonset_main_test.go b/test/integration/k8s/daemonset_python/k8s_daemonset_main_test.go index bba513e69..c1e805dc6 100644 --- a/test/integration/k8s/daemonset_python/k8s_daemonset_main_test.go +++ b/test/integration/k8s/daemonset_python/k8s_daemonset_main_test.go @@ -28,7 +28,7 @@ func TestMain(m *testing.M) { docker.ImageBuild{Tag: "beyla:dev", Dockerfile: k8s.DockerfileBeyla}, docker.ImageBuild{Tag: "quay.io/prometheus/prometheus:v2.53.0"}, docker.ImageBuild{Tag: "otel/opentelemetry-collector-contrib:0.103.0"}, - docker.ImageBuild{Tag: "jaegertracing/all-in-one:1"}, + docker.ImageBuild{Tag: "jaegertracing/all-in-one:1.57"}, ); err != nil { slog.Error("can't build docker images", err) os.Exit(-1) @@ -41,7 +41,7 @@ func TestMain(m *testing.M) { kube.LocalImage("beyla:dev"), kube.LocalImage("quay.io/prometheus/prometheus:v2.53.0"), kube.LocalImage("otel/opentelemetry-collector-contrib:0.103.0"), - kube.LocalImage("jaegertracing/all-in-one:1"), + kube.LocalImage("jaegertracing/all-in-one:1.57"), kube.Deploy(k8s.PathManifests+"/01-volumes.yml"), kube.Deploy(k8s.PathManifests+"/01-serviceaccount.yml"), kube.Deploy(k8s.PathManifests+"/03-otelcol.yml"), diff --git a/test/integration/k8s/disable_informers/k8s_disable_informers_test.go b/test/integration/k8s/disable_informers/k8s_disable_informers_test.go index 69f1a0620..006c54b54 100644 --- a/test/integration/k8s/disable_informers/k8s_disable_informers_test.go +++ b/test/integration/k8s/disable_informers/k8s_disable_informers_test.go @@ -22,7 +22,7 @@ func TestMain(m *testing.M) { docker.ImageBuild{Tag: "httppinger:dev", Dockerfile: k8s.DockerfileHTTPPinger}, docker.ImageBuild{Tag: "quay.io/prometheus/prometheus:v2.53.0"}, docker.ImageBuild{Tag: "otel/opentelemetry-collector-contrib:0.103.0"}, - docker.ImageBuild{Tag: "jaegertracing/all-in-one:1"}, + docker.ImageBuild{Tag: "jaegertracing/all-in-one:1.57"}, ); err != nil { slog.Error("can't build docker images", err) os.Exit(-1) @@ -36,7 +36,7 @@ func TestMain(m *testing.M) { kube.LocalImage("httppinger:dev"), kube.LocalImage("quay.io/prometheus/prometheus:v2.53.0"), kube.LocalImage("otel/opentelemetry-collector-contrib:0.103.0"), - kube.LocalImage("jaegertracing/all-in-one:1"), + kube.LocalImage("jaegertracing/all-in-one:1.57"), kube.Deploy(k8s.PathManifests+"/01-volumes.yml"), kube.Deploy(k8s.PathManifests+"/01-serviceaccount.yml"), kube.Deploy(k8s.PathManifests+"/02-prometheus-otelscrape.yml"), diff --git a/test/integration/k8s/manifests/03-otelcol.yml b/test/integration/k8s/manifests/03-otelcol.yml index d1f760fbc..b44449ae4 100644 --- a/test/integration/k8s/manifests/03-otelcol.yml +++ b/test/integration/k8s/manifests/03-otelcol.yml @@ -32,7 +32,7 @@ spec: claimName: configs containers: - name: otelcol - image: otel/opentelemetry-collector-contrib:0.103.0 + image: otel/opentelemetry-collector-contrib:0.104.0 args: [ "--config=/etc/otelcol-config/otelcol-config.yml" ] volumeMounts: - mountPath: /etc/otelcol-config diff --git a/test/integration/k8s/manifests/04-jaeger.yml b/test/integration/k8s/manifests/04-jaeger.yml index 808daa7f3..eb48dfc98 100644 --- a/test/integration/k8s/manifests/04-jaeger.yml +++ b/test/integration/k8s/manifests/04-jaeger.yml @@ -25,7 +25,7 @@ metadata: spec: containers: - name: jaeger - image: jaegertracing/all-in-one:1 + image: jaegertracing/all-in-one:1.57 ports: - containerPort: 4317 name: otlp-grpc diff --git a/test/integration/k8s/otel/k8s_main_test.go b/test/integration/k8s/otel/k8s_main_test.go index ee0652f05..903328441 100644 --- a/test/integration/k8s/otel/k8s_main_test.go +++ b/test/integration/k8s/otel/k8s_main_test.go @@ -32,7 +32,7 @@ func TestMain(m *testing.M) { docker.ImageBuild{Tag: "httppinger:dev", Dockerfile: k8s.DockerfileHTTPPinger}, docker.ImageBuild{Tag: "quay.io/prometheus/prometheus:v2.53.0"}, docker.ImageBuild{Tag: "otel/opentelemetry-collector-contrib:0.103.0"}, - docker.ImageBuild{Tag: "jaegertracing/all-in-one:1"}, + docker.ImageBuild{Tag: "jaegertracing/all-in-one:1.57"}, ); err != nil { slog.Error("can't build docker images", err) os.Exit(-1) @@ -47,7 +47,7 @@ func TestMain(m *testing.M) { kube.LocalImage("httppinger:dev"), kube.LocalImage("quay.io/prometheus/prometheus:v2.53.0"), kube.LocalImage("otel/opentelemetry-collector-contrib:0.103.0"), - kube.LocalImage("jaegertracing/all-in-one:1"), + kube.LocalImage("jaegertracing/all-in-one:1.57"), kube.Deploy(k8s.PathManifests+"/01-volumes.yml"), kube.Deploy(k8s.PathManifests+"/01-serviceaccount.yml"), kube.Deploy(k8s.PathManifests+"/02-prometheus-otelscrape.yml"), diff --git a/test/integration/k8s/owners/k8s_owners_main_test.go b/test/integration/k8s/owners/k8s_owners_main_test.go index 4ca4645c7..b37863779 100644 --- a/test/integration/k8s/owners/k8s_owners_main_test.go +++ b/test/integration/k8s/owners/k8s_owners_main_test.go @@ -30,7 +30,7 @@ func TestMain(m *testing.M) { docker.ImageBuild{Tag: "beyla:dev", Dockerfile: k8s.DockerfileBeyla}, docker.ImageBuild{Tag: "quay.io/prometheus/prometheus:v2.53.0"}, docker.ImageBuild{Tag: "otel/opentelemetry-collector-contrib:0.103.0"}, - docker.ImageBuild{Tag: "jaegertracing/all-in-one:1"}, + docker.ImageBuild{Tag: "jaegertracing/all-in-one:1.57"}, ); err != nil { slog.Error("can't build docker images", err) os.Exit(-1) @@ -44,7 +44,7 @@ func TestMain(m *testing.M) { kube.LocalImage("grpcpinger:dev"), kube.LocalImage("quay.io/prometheus/prometheus:v2.53.0"), kube.LocalImage("otel/opentelemetry-collector-contrib:0.103.0"), - kube.LocalImage("jaegertracing/all-in-one:1"), + kube.LocalImage("jaegertracing/all-in-one:1.57"), kube.Deploy(k8s.PathManifests+"/01-volumes.yml"), kube.Deploy(k8s.PathManifests+"/01-serviceaccount.yml"), kube.Deploy(k8s.PathManifests+"/03-otelcol.yml"), diff --git a/test/oats/kafka/docker-compose-beyla-go-kafka-go.yml b/test/oats/kafka/docker-compose-beyla-go-kafka-go.yml new file mode 100644 index 000000000..0e663875c --- /dev/null +++ b/test/oats/kafka/docker-compose-beyla-go-kafka-go.yml @@ -0,0 +1,71 @@ +services: + zookeeper: + restart: always + container_name: kafka-like-zookeeper + image: docker.io/bitnami/zookeeper:3.8 + ports: + - "2181:2181" + volumes: + - "zookeeper-volume:/bitnami" + environment: + - ALLOW_ANONYMOUS_LOGIN=yes + kafka: + restart: always + container_name: kafka-like + image: docker.io/bitnami/kafka:3.3 + ports: + - "9093:9093" + - "9092:9092" + volumes: + - "kafka-volume:/bitnami" + environment: + - KAFKA_BROKER_ID=1 + - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - ALLOW_PLAINTEXT_LISTENER=yes + - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CLIENT:PLAINTEXT,EXTERNAL:PLAINTEXT + - KAFKA_CFG_LISTENERS=CLIENT://:9092,EXTERNAL://:9093 + - KAFKA_CFG_ADVERTISED_LISTENERS=CLIENT://kafka:9092,EXTERNAL://kafka:9093 + - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=CLIENT + depends_on: + - zookeeper + # Go HTTP server based on the Sarama Kafka examples + testserver: + build: + context: ../../.. + dockerfile: ./test/integration/components/gokafka-seg/Dockerfile + image: gokafka + ports: + - "8080:8080" + depends_on: + kafka: + condition: service_started + # eBPF auto instrumenter + autoinstrumenter: + build: + context: ../../.. + dockerfile: ./test/integration/components/beyla/Dockerfile + command: + - --config=/configs/instrumenter-config-traces.yml + volumes: + - {{ .ConfigDir }}:/configs + - ./testoutput/run:/var/run/beyla + - ../../../testoutput:/coverage + privileged: true # in some environments (not GH Pull Requests) you can set it to false and then cap_add: [ SYS_ADMIN ] + network_mode: "service:testserver" + pid: "service:testserver" + environment: + GOCOVERDIR: "/coverage" + BEYLA_PRINT_TRACES: "true" + BEYLA_OPEN_PORT: {{ .ApplicationPort }} + BEYLA_SERVICE_NAMESPACE: "integration-test" + BEYLA_METRICS_INTERVAL: "10ms" + BEYLA_BPF_BATCH_TIMEOUT: "10ms" + BEYLA_LOG_LEVEL: "DEBUG" + BEYLA_BPF_DEBUG: "true" + OTEL_EXPORTER_OTLP_ENDPOINT: "http://collector:4318" + depends_on: + testserver: + condition: service_started +volumes: + kafka-volume: + zookeeper-volume: \ No newline at end of file diff --git a/test/oats/kafka/docker-compose-beyla-go-kafka.yml b/test/oats/kafka/docker-compose-beyla-go-kafka-sarama.yml similarity index 100% rename from test/oats/kafka/docker-compose-beyla-go-kafka.yml rename to test/oats/kafka/docker-compose-beyla-go-kafka-sarama.yml diff --git a/test/oats/kafka/yaml/oats_go_kafka-go.yaml b/test/oats/kafka/yaml/oats_go_kafka-go.yaml new file mode 100644 index 000000000..cc8d9377f --- /dev/null +++ b/test/oats/kafka/yaml/oats_go_kafka-go.yaml @@ -0,0 +1,39 @@ +docker-compose: + generator: generic + files: + - ../docker-compose-beyla-go-kafka-go.yml +input: + - path: '/ping' + +interval: 500ms +expected: + traces: + - traceql: '{ .messaging.operation.type = "publish" && .messaging.destination.name="logging"}' + spans: + - name: 'logging publish' + attributes: + messaging.destination.name: logging + messaging.operation.type: publish + messaging.system: kafka + server.port: "9092" + - traceql: '{ .messaging.operation.type = "process" && .messaging.destination.name="logging"}' + spans: + - name: 'logging process' + attributes: + messaging.destination.name: logging + messaging.operation.type: process + messaging.system: kafka + server.port: "9092" + metrics: + - promql: 'messaging_publish_duration_count{messaging_system="kafka", messaging_destination_name="logging"}' + value: "> 0" + - promql: 'messaging_publish_duration_bucket{le="0"}' + value: "== 0" + - promql: 'messaging_publish_duration_bucket{le="10"}' + value: "> 0" + - promql: 'messaging_process_duration_count{messaging_system="kafka", messaging_destination_name="logging"}' + value: "> 0" + - promql: 'messaging_process_duration_bucket{le="0"}' + value: "== 0" + - promql: 'messaging_process_duration_bucket{le="10"}' + value: "> 0" diff --git a/test/oats/kafka/yaml/oats_go_kafka.yaml b/test/oats/kafka/yaml/oats_go_kafka-sarama.yaml similarity index 97% rename from test/oats/kafka/yaml/oats_go_kafka.yaml rename to test/oats/kafka/yaml/oats_go_kafka-sarama.yaml index 5bc5ef417..4e9899d3f 100644 --- a/test/oats/kafka/yaml/oats_go_kafka.yaml +++ b/test/oats/kafka/yaml/oats_go_kafka-sarama.yaml @@ -1,7 +1,7 @@ docker-compose: generator: generic files: - - ../docker-compose-beyla-go-kafka.yml + - ../docker-compose-beyla-go-kafka-sarama.yml input: - path: '/?data'