Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NF pool #240

Open
wants to merge 24 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions onvm/onvm_mgr/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,10 @@ master_thread_main(void) {
rte_atomic16_set(nf_wakeup_infos[i].shm_server, 0);
sem_post(nf_wakeup_infos[i].mutex);
}
if (nfs[i].pool_status.pool_sleep_state == 1) {
nfs[i].pool_status.pool_sleep_state = 0;
sem_post(nf_pool_wakeup_infos[i].mutex);
}
}

/* Wait to process all exits */
Expand All @@ -171,6 +175,12 @@ master_thread_main(void) {
}
}

/* Clean up the nf pool structs */
for (i = 0; i < MAX_NFS; i++) {
sem_close(nf_pool_wakeup_infos[i].mutex);
sem_unlink(nf_pool_wakeup_infos[i].sem_name);
}

RTE_LOG(INFO, APP, "Core %d: Master thread done\n", rte_lcore_id());
}

Expand Down
30 changes: 29 additions & 1 deletion onvm/onvm_mgr/onvm_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ struct port_info *ports = NULL;
struct core_status *cores = NULL;
struct onvm_configuration *onvm_config = NULL;
struct nf_wakeup_info *nf_wakeup_infos = NULL;
struct nf_wakeup_info *nf_pool_wakeup_infos = NULL;

struct rte_mempool *pktmbuf_pool;
struct rte_mempool *nf_init_cfg_pool;
Expand Down Expand Up @@ -86,6 +87,9 @@ init_port(uint8_t port_num);
static void
init_shared_sem(void);

static void
init_pool_sem(void);

static int
init_info_queue(void);

Expand Down Expand Up @@ -244,6 +248,9 @@ init(int argc, char *argv[]) {
/* initialise the shared memory for shared core mode */
init_shared_sem();

/* Init mutex's for NF pool structures */
init_pool_sem();

/*initialize a default service chain*/
default_chain = onvm_sc_create();
retval = onvm_sc_append_entry(default_chain, ONVM_NF_ACTION_TONF, 1);
Expand All @@ -263,7 +270,6 @@ init(int argc, char *argv[]) {
onvm_sc_print(default_chain);

onvm_flow_dir_init();

return 0;
}

Expand Down Expand Up @@ -446,6 +452,28 @@ init_shared_sem(void) {
}
}

static void
init_pool_sem(void) {
uint16_t i;
sem_t *mutex;
char * sem_name;

nf_pool_wakeup_infos = rte_calloc("POOL_SHM_INFOS", sizeof(struct nf_wakeup_info), MAX_NFS, 0);
for (i = 0; i < MAX_NFS; i++) {
sem_name = rte_malloc(NULL, sizeof(char) * 64, 0);
snprintf(sem_name, 64, "nf_pool_%d", i);
nf_pool_wakeup_infos[i].sem_name = sem_name;

mutex = sem_open(sem_name, O_CREAT, 06666, 0);
if (mutex == SEM_FAILED) {
fprintf(stderr, "can not create semaphore for NF %d\n", i);
sem_unlink(sem_name);
exit(1);
}
nf_pool_wakeup_infos[i].mutex = mutex;
}
}

/**
* Allocate a rte_ring for newly created NFs
*/
Expand Down
1 change: 1 addition & 0 deletions onvm/onvm_mgr/onvm_init.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ extern uint8_t ONVM_NF_SHARE_CORES;

/* For handling shared core logic */
extern struct nf_wakeup_info *nf_wakeup_infos;
extern struct nf_wakeup_info *nf_pool_wakeup_infos;

/**********************************Functions**********************************/

Expand Down
33 changes: 33 additions & 0 deletions onvm/onvm_mgr/onvm_nf.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,16 @@ onvm_nf_relocate_nf(uint16_t nf, uint16_t new_core);
static void
onvm_nf_init_lpm_region(struct lpm_request *req_lpm);

/*
* Function that initializes an rte_ring
*
* Input : the address of an ring request struct
* Output : a return code based on initialization of the ring object
*
*/
static void
onvm_nf_init_single_ring(struct ring_request *ring_req);

/*
* Function that initializes a hashtable for a flow_table struct
*
Expand Down Expand Up @@ -173,6 +183,8 @@ onvm_nf_check_status(void) {
struct onvm_nf_init_cfg *nf_init_cfg;
struct lpm_request *req_lpm;
struct ft_request *ft;
struct ring_request *ring_req;
struct id_request *id_req;
uint16_t stop_nf_id;
int num_msgs = rte_ring_count(incoming_msg_queue);

Expand All @@ -195,12 +207,21 @@ onvm_nf_check_status(void) {
ft = (struct ft_request *) msg->msg_data;
onvm_nf_init_ft(ft);
break;
case MSG_REQUEST_RING:
ring_req = (struct ring_request *) msg->msg_data;
onvm_nf_init_single_ring(ring_req);
break;
case MSG_NF_STARTING:
nf_init_cfg = (struct onvm_nf_init_cfg *)msg->msg_data;
if (onvm_nf_start(nf_init_cfg) == 0) {
onvm_stats_gen_event_nf_info("NF Starting", &nfs[nf_init_cfg->instance_id]);
}
break;
case MSG_REQUEST_ID:
id_req = (struct id_request *) msg->msg_data;
id_req->instance_id = next_instance_id;
id_req->status = 0;
break;
case MSG_NF_READY:
nf = (struct onvm_nf *)msg->msg_data;
if (onvm_nf_ready(nf) == 0) {
Expand Down Expand Up @@ -291,6 +312,7 @@ onvm_nf_start(struct onvm_nf_init_cfg *nf_init_cfg) {
return 1;
}

spawned_nf->pool_status.pool_sleep_state = 0;
spawned_nf->instance_id = nf_id;
spawned_nf->service_id = nf_init_cfg->service_id;
spawned_nf->status = NF_STARTING;
Expand Down Expand Up @@ -454,6 +476,17 @@ onvm_nf_init_ft(struct ft_request *ft) {
}
}

static void
onvm_nf_init_single_ring(struct ring_request *ring_req) {
struct rte_ring *ring;
ring = rte_ring_create(ring_req->name, ring_req->count, rte_socket_id(), RING_F_SC_DEQ);
if (ring) {
ring_req->status = 0;
} else {
ring_req->status = -1;
}
}

inline int
onvm_nf_relocate_nf(uint16_t dest, uint16_t new_core) {
uint16_t *msg_data;
Expand Down
6 changes: 6 additions & 0 deletions onvm/onvm_mgr/onvm_stats.c
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,7 @@ onvm_stats_display_nfs(unsigned difftime, uint8_t verbosity_level) {

/* For same service id TX/RX stats */
uint8_t print_total_stats = 0;
uint64_t pool_count = 0;
uint64_t rx_for_service[MAX_SERVICES];
uint64_t tx_for_service[MAX_SERVICES];
uint64_t rx_drop_for_service[MAX_SERVICES];
Expand Down Expand Up @@ -445,6 +446,10 @@ onvm_stats_display_nfs(unsigned difftime, uint8_t verbosity_level) {
for (i = 0; i < MAX_NFS; i++) {
if (!onvm_nf_is_valid(&nfs[i]))
continue;
if (nfs[i].pool_status.pool_sleep_state == 1) {
pool_count++;
continue;
}
const uint64_t rx = nfs[i].stats.rx;
const uint64_t rx_drop = nfs[i].stats.rx_drop;
const uint64_t tx = nfs[i].stats.tx;
Expand Down Expand Up @@ -572,6 +577,7 @@ onvm_stats_display_nfs(unsigned difftime, uint8_t verbosity_level) {
onvm_stats_display_client_wakeup_thread_context(difftime);
}

fprintf(stats_out, "\n\nNumber of NF's in pool: %ld\n--------------------------\n", pool_count);
}

/***************************Helper functions**********************************/
Expand Down
51 changes: 51 additions & 0 deletions onvm/onvm_nflib/onvm_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ struct onvm_nf {
uint16_t instance_id;
uint16_t service_id;
uint8_t status;
uint8_t pool_nf;
char *tag;
/* Pointer to NF defined state data */
void *data;
Expand Down Expand Up @@ -324,6 +325,17 @@ struct onvm_nf {
/* Mutex for NF sem_wait */
sem_t *nf_mutex;
} shared_core;

struct {
/*
* Sleep state to track state of whether the NF is active
* Same logic as shared core
*/
volatile int16_t pool_sleep_state;
sem_t *pool_mutex;
const char *pool_mutex_name;
char *binary_executable;
} pool_status;
};

/*
Expand All @@ -342,6 +354,14 @@ struct onvm_nf_init_cfg {
uint16_t pkt_limit;
};

struct onvm_nf_pool_ctx {
struct rte_ring *pool_ring;
const char *args;
const char *binary_executable;
const char *nf_name;
unsigned refill;
};

/*
* Define a structure to describe a service chain entry
*/
Expand Down Expand Up @@ -372,6 +392,34 @@ struct ft_request {
int status;
};

struct ring_request {
char *name;
unsigned count;
unsigned flags;
int status;
};

struct id_request {
int instance_id;
int status;
};

struct simple_forward_args {
const char *service_id;
const char *destination_id;
struct {
const char *print_delay;
} optional_args;
};

struct aes_decrypt_args {
const char *service_id;
const char *destination_id;
struct {
const char *print_delay;
} optional_args;
};

/* define common names for structures shared between server and NF */
#define MP_NF_RXQ_NAME "MProc_Client_%u_RX"
#define MP_NF_TXQ_NAME "MProc_Client_%u_TX"
Expand All @@ -390,6 +438,7 @@ struct ft_request {
#define _NF_MSG_QUEUE_NAME "NF_%u_MSG_QUEUE"
#define _NF_MEMPOOL_NAME "NF_INFO_MEMPOOL"
#define _NF_MSG_POOL_NAME "NF_MSG_MEMPOOL"
#define _NF_POOL_NAME "onvm_nf_pool"

/* interrupt semaphore specific updates */
#define SHMSZ 4 // size of shared memory segement (page_size)
Expand All @@ -411,6 +460,8 @@ struct ft_request {
#define NF_CORE_BUSY 12 // The manually selected core is busy
#define NF_WAITING_FOR_LPM 13 // NF is waiting for a LPM request to be fulfilled
#define NF_WAITING_FOR_FT 14 // NF is waiting for a flow-table request to be fulfilled
#define NF_WAITING_FOR_RING 15 // NF is waiting for a ring request to be fulfilled
#define NF_WAITING_FOR_INSTANCE_ID 16 // NF is waiting for the manager to return the next instance ID

#define NF_NO_ID -1

Expand Down
1 change: 1 addition & 0 deletions onvm/onvm_nflib/onvm_includes.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@
#include <rte_ring.h>
#include <rte_string_fns.h>
#include <rte_tailq.h>
#include <rte_jhash.h>

/******************************Internal headers*******************************/

Expand Down
2 changes: 2 additions & 0 deletions onvm/onvm_nflib/onvm_msg_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@
#define MSG_REQUEST_LPM_REGION 7
#define MSG_CHANGE_CORE 8
#define MSG_REQUEST_FT 9
#define MSG_REQUEST_RING 10
#define MSG_REQUEST_ID 11

struct onvm_nf_msg {
uint8_t msg_type; /* Constant saying what type of message is */
Expand Down
Loading