Skip to content

Commit

Permalink
Upgrade to Envoy v1.15.0
Browse files Browse the repository at this point in the history
  • Loading branch information
chintan8saaras committed Aug 11, 2020
1 parent 78b1646 commit b55dfd8
Show file tree
Hide file tree
Showing 8 changed files with 78 additions and 60 deletions.
2 changes: 1 addition & 1 deletion enroute-dp/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM envoyproxy/envoy:v1.14.1
FROM envoyproxy/envoy:v1.15.0
WORKDIR /enroute
COPY enroute /enroute
COPY redis-server /bin
Expand Down
4 changes: 2 additions & 2 deletions enroute-dp/internal/envoy/listener_filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ func updateHttpVHFilters(listener_filters *[]*http.HttpFilter,
// Correctly order the HttpFilters from the map constructed in previous step

// Lua
if hf, ok := m["envoy.lua"]; ok {
if hf, ok := m[wellknown.Lua]; ok {
http_filters = append(http_filters, hf)
}

Expand All @@ -246,7 +246,7 @@ func updateHttpVHFilters(listener_filters *[]*http.HttpFilter,
}

// Rate Limit
if hf, ok := m["envoy.rate_limit"]; ok {
if hf, ok := m[wellknown.HTTPRateLimit]; ok {
http_filters = append(http_filters, hf)
}

Expand Down
34 changes: 0 additions & 34 deletions enroute-dp/internal/envoy/route_rl.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,6 @@ func rateLimitActionSpecifierHeaderValueMatch() *envoy_api_v2_route.RateLimit_Ac
}
}

//func rateLimitActionSpecifierRequestHeaders(header_name, descriptor_key string) *envoy_api_v2_route.RateLimit_Action_RequestHeaders_ {
// return &envoy_api_v2_route.RateLimit_Action_RequestHeaders_{
// RequestHeaders: &envoy_api_v2_route.RateLimit_Action_RequestHeaders{
// HeaderName: "user-agent",
// DescriptorKey: "useragent",
// },
// }
//}
//func rateLimitActionSpecifierGenericKey(generic_key string) *envoy_api_v2_route.RateLimit_Action_GenericKey_ {
// return &envoy_api_v2_route.RateLimit_Action_GenericKey_{
// GenericKey: &envoy_api_v2_route.RateLimit_Action_GenericKey{
// DescriptorValue: "default",
// },
// }
//}

func rateLimitActionSpecifierRequestHeaders(header_name, descriptor_key string) *envoy_api_v2_route.RateLimit_Action_RequestHeaders_ {
return &envoy_api_v2_route.RateLimit_Action_RequestHeaders_{
RequestHeaders: &envoy_api_v2_route.RateLimit_Action_RequestHeaders{
Expand Down Expand Up @@ -160,24 +144,6 @@ func rateLimits(rl *dag.RouteFilter) []*envoy_api_v2_route.RateLimit {
}

return rrl_slice

// return []*envoy_api_v2_route.RateLimit{
// {
// Stage: u32(0),
// Actions: []*envoy_api_v2_route.RateLimit_Action{
// rateLimitAction3(),
// rateLimitAction(),
// },
// },
// {
// Stage: u32(0),
// Actions: []*envoy_api_v2_route.RateLimit_Action{
// rateLimitAction(),
// },
// },
// }

// return []*envoy_api_v2_route.RateLimit{}
}

func SetupRouteRateLimits(r *dag.Route, ra *envoy_api_v2_route.RouteAction) {
Expand Down
58 changes: 52 additions & 6 deletions enroute-dp/internal/grpc/server.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright(c) 2018-2020 Saaras Inc.


// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
Expand Down Expand Up @@ -66,6 +65,22 @@ func NewAPI(log logrus.FieldLogger, resources map[string]Resource) *grpc.Server
return g
}

func NewAPIRateLimit(log logrus.FieldLogger, c chan string) *grpc.Server {
opts := []grpc.ServerOption{
// By default the Go grpc library defaults to a value of ~100 streams per
// connection. This number is likely derived from the HTTP/2 spec:
// https://http2.github.io/http2-spec/#SettingValues
// We need to raise this value because Envoy will open one EDS stream per
// CDS entry. There doesn't seem to be a penalty for increasing this value,
// so set it the limit similar to envoyproxy/go-control-plane#70.
grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams),
}
g := grpc.NewServer(opts...)
rls := &ratelimitServer{}
rl.RegisterRateLimitServiceServer(g, rls)
return g
}

// grpcServer implements the LDS, RDS, CDS, and EDS, gRPC endpoints.
type grpcServer struct {
xdsHandler
Expand Down Expand Up @@ -148,12 +163,47 @@ func (s *ratelimitServer) rateLimitDescriptor() *rl.RateLimitResponse_Descriptor
return &rl.RateLimitResponse_DescriptorStatus{Code: rl.RateLimitResponse_OK, CurrentLimit: l, LimitRemaining: 5}
}

//func (this *rateLimitServer) DoLimit(
// request *rl.RateLimitRequest) []*rl.RateLimitResponse_DescriptorStatus {
//}

//func (this *service) shouldRateLimitWorker(
// ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse {
//
// checkServiceErr(request.Domain != "", "rate limit domain must not be empty")
// checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty")
//
// snappedConfig := this.GetCurrentConfig()
// checkServiceErr(snappedConfig != nil, "no rate limit configuration loaded")
//
// limitsToCheck := make([]*config.RateLimit, len(request.Descriptors))
// for i, descriptor := range request.Descriptors {
// limitsToCheck[i] = snappedConfig.GetLimit(ctx, request.Domain, descriptor)
// }
//
// responseDescriptorStatuses := this.cache.DoLimit(ctx, request, limitsToCheck)
// assert.Assert(len(limitsToCheck) == len(responseDescriptorStatuses))
//
// response := &pb.RateLimitResponse{}
// response.Statuses = make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors))
// finalCode := pb.RateLimitResponse_OK
// for i, descriptorStatus := range responseDescriptorStatuses {
// response.Statuses[i] = descriptorStatus
// if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT {
// finalCode = descriptorStatus.Code
// }
// }
//
// response.OverallCode = finalCode
// return response
//}

func (s *ratelimitServer) ShouldRateLimit(c context.Context, req *rl.RateLimitRequest) (*rl.RateLimitResponse, error) {
fmt.Printf("Received rate limit request +[%v]\n", req)
response := &rl.RateLimitResponse{}
response.Statuses = make([]*rl.RateLimitResponse_DescriptorStatus, len(req.Descriptors))
finalCode := rl.RateLimitResponse_OK
for i, _ := range req.Descriptors {
for i := range req.Descriptors {
descriptorStatus := s.rateLimitDescriptor()
response.Statuses[i] = descriptorStatus
if descriptorStatus.Code == rl.RateLimitResponse_OVER_LIMIT {
Expand All @@ -164,7 +214,3 @@ func (s *ratelimitServer) ShouldRateLimit(c context.Context, req *rl.RateLimitRe
response.OverallCode = finalCode
return response, nil
}

func NewAPIRateLimit(log logrus.FieldLogger, c chan string) *grpc.Server {
return nil
}
22 changes: 11 additions & 11 deletions enroute-dp/saaras/saarascloudcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,17 +270,17 @@ func saaras_upstream__to__v1_ep(mss *SaarasMicroService2) *v1.Endpoints {
}
ep_subsets_ports = append(ep_subsets_ports, ep_subsets_port)

// We don't create endpoints if the upstream IP is not a valid IP address
// This is OK.
// The way this works is -
// If we can parse a valid IP, we create an endpoint and hand it to EDS. The cluster gets an endpoint
// If we cannot parse an IP, we don't create an endpoint. EDS does not provide it to cluster. In such a case,
// the cluster creation logic checks and programs external name for that cluster with with the endpoint with STRICT_DNS
// Hence it an endpoint is not required in such cases. Function saaras_ir_slice__to__v1b1_service_map incorporates this logic
// We don't create endpoints if the upstream IP is not a valid IP address
// This is OK.
// The way this works is -
// If we can parse a valid IP, we create an endpoint and hand it to EDS. The cluster gets an endpoint
// If we cannot parse an IP, we don't create an endpoint. EDS does not provide it to cluster. In such a case,
// the cluster creation logic checks and programs external name for that cluster with with the endpoint with STRICT_DNS
// Hence it an endpoint is not required in such cases. Function saaras_ir_slice__to__v1b1_service_map incorporates this logic
if net.ParseIP(mss.Upstream.Upstream_ip) == nil {
ips, err := net.LookupIP(mss.Upstream.Upstream_ip)
if err != nil {
// TODO: Add log here
// TODO: Add log here
//log.Debugf(" Upstream [%s] not parsed as IP", mss.Upstream.Upstream_ip)
}
for _, ip := range ips {
Expand Down Expand Up @@ -537,9 +537,9 @@ func (sac *SaarasCloudCache) update__v1__vf_cache(v1b1_vf_map *map[string]*v1bet
for _, cloud_vf := range *v1b1_vf_map {
if cached_vf, ok := sac.vf[cloud_vf.ObjectMeta.Namespace+cloud_vf.ObjectMeta.Name+cloud_vf.Spec.Type]; ok {
if apiequality.Semantic.DeepEqual(cached_vf, cloud_vf) {
log.Infof("update__v1__vf_cache() - RF [%s] on saaras cloud same as cache\n", cloud_vf.ObjectMeta.Name)
log.Infof("update__v1__vf_cache() - HTTPFilter [%s] on saaras cloud same as cache\n", cloud_vf.ObjectMeta.Name)
} else {
log.Infof("update__v1__vf_cache() - RF [%s] on saaras cloud different from cache - OnUpdate()\n", cloud_vf.ObjectMeta.Name)
log.Infof("update__v1__vf_cache() - HTTPFilter [%s] on saaras cloud different from cache - OnUpdate()\n", cloud_vf.ObjectMeta.Name)
sac.vf[cloud_vf.ObjectMeta.Namespace+cloud_vf.ObjectMeta.Name+cloud_vf.Spec.Type] = cloud_vf
reh.OnUpdate(cached_vf, cloud_vf)
}
Expand All @@ -548,7 +548,7 @@ func (sac *SaarasCloudCache) update__v1__vf_cache(v1b1_vf_map *map[string]*v1bet
sac.vf = make(map[string]*v1beta1.HttpFilter, 0)
}
sac.vf[cloud_vf.ObjectMeta.Namespace+cloud_vf.ObjectMeta.Name+cloud_vf.Spec.Type] = cloud_vf
log.Infof("update__v1__vf_cache() - RF [%s] on saaras cloud not in cache - OnAdd()\n", cloud_vf.ObjectMeta.Name)
log.Infof("update__v1__vf_cache() - HTTPFilter [%s] on saaras cloud not in cache - OnAdd()\n", cloud_vf.ObjectMeta.Name)
reh.OnAdd(cloud_vf)
}
}
Expand Down
9 changes: 6 additions & 3 deletions packaging/enroute-cp/Dockerfile.cp
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,18 @@
# (3.2.1) Migration script runs hasura temporarily and then kills it.
# (3.3) Start hasura

FROM ubuntu:16.04
FROM ubuntu:18.04

WORKDIR /bin

# 1.1
RUN apt-get update
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y software-properties-common
RUN add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
RUN apt-get update && apt-get install -y gnupg2 libicu55
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ xenial-pgdg main" > /etc/apt/sources.list.d/pgdg.list
RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-11 postgresql-client-11 postgresql-contrib-11
RUN apt-get update && apt-get install -y software-properties-common postgresql-11 postgresql-client-11 postgresql-contrib-11

#1.2
RUN apt-get update && apt-get install -y supervisor vim netcat net-tools sed
Expand Down
9 changes: 6 additions & 3 deletions packaging/enroute-cp/Dockerfile.gw
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,18 @@
# (3.2.1) Migration script runs hasura temporarily and then kills it.
# (3.3) Start hasura

FROM ubuntu:16.04
FROM ubuntu:18.04

WORKDIR /bin

# 1.1
RUN apt-get update
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y software-properties-common
RUN add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main"
RUN apt-get update && apt-get install -y gnupg2 libicu55
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ xenial-pgdg main" > /etc/apt/sources.list.d/pgdg.list
RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-11 postgresql-client-11 postgresql-contrib-11
RUN apt-get update && apt-get install -y software-properties-common postgresql-11 postgresql-client-11 postgresql-contrib-11

#1.2
RUN apt-get update && apt-get install -y supervisor vim netcat net-tools sed curl jq
Expand Down
Binary file modified packaging/enroute-cp/envoy
Binary file not shown.

0 comments on commit b55dfd8

Please sign in to comment.