Standalone C library for training neural networks.
- Multiple built-in activation functions (ReLU, Sigmoid, Tanh, Linear)
- Support for implementing custom activation functions
- Multiple built-in optimizers (SGD, Adam)
- Support for implementing custom optimizers
- Multiple built-in loss functions (MSE, BCE)
- Support for implementing custom loss function
- Configurable training hyperparameters
- Model save and load support (binary format)
- Minimal dependencies — pure ANSI C
- Memory allocations using arenas for faster and safer memory management
#include "neu.h" // Include the library
int main(void) {
// Initialize the library's internal memory and global state
neu_init();
// Define the architecture of the neural network
// 1 input, 1 hidden layer with 8 neurons, and 1 output
size_t arch[] = {1, 8, 1};
size_t depth = sizeof(arch) / sizeof(size_t);
// Create the neural network with the specified architecture
neu_NeuralNetwork* nn = neu_nn_create(depth, arch);
// Set the activation functions for each layer (excluding input layer)
// Using sigmoid for the hidden layer and linear for the output layer
neu_nn_set_act_ar(2, nn, (neu_Activation[]) {
NEU_SIGMOID_ACT,
NEU_LINEAR_ACT
});
// Set the optimizer to Adam for training
neu_nn_set_opt(nn, NEU_ADAM_OPT);
// Set hyperparameters for training
neu_nn_set_hyper_params(nn, (neu_HyperParams){
.learning_rate = 0.01,
.beta_1 = 0.9,
.beta_2 = 0.999,
.epsilon = 1e-8,
.seed = 42
});
// Initialize the neural network's weights and internal state
neu_nn_init(nn);
// Load training dataset from file: cubic_function.txt
// Assumes each line has 1 input and 1 output value
neu_DataSet* ds = neu_ds_load("datasets/cubic_function.txt", (neu_DataSetLoad){
.inputs = 1,
.outputs = 1
});
// Train the neural network using the dataset
// Train for 5000 epochs and print status every 500 epochs
neu_nn_train(nn, ds, (neu_TrainOptions) {
.epochs = 5000,
.print_every = 500
});
// Array of test input values for inference
double test_points[] = {10.0, 30.0, 50.0, 80.0, 100.0};
size_t num_tests = sizeof(test_points) / sizeof(double);
printf("\nPredictions:\n");
// Create reusable input and output vectors for prediction
neu_Vector* input_vec = neu_vec_create(1, G_ARENA);
neu_Vector* output_vec = neu_vec_create(1, G_ARENA);
for (size_t i = 0; i < num_tests; ++i) {
double x = test_points[i];
// Set the input vector with current test value
neu_vec_set_ar(1, input_vec, (double[]){x});
// Run inference with the trained model
neu_nn_predict(nn, ds, (neu_PredictOptions) {
.input = input_vec,
.output = output_vec
});
// Fetch and print the predicted output
double y = neu_vec_get(0, output_vec);
printf("x = %.2f, predicted y = %.2f\n", x, y);
}
// Save the trained model to a file for later reuse
neu_nn_save(nn, "my_model.bin");
// Clean up: free all memory used by the model and library
neu_nn_destroy(nn);
neu_free();
return 0;
}- Clone the repo
git clone https://github.com/nishantHolla/neurite.git
cd neurite- Make the library (uses gcc by default)
make lib// Define your activation function with the following signature
void my_act_func(neu_Vector* p_dest, const neu_Vector* p_src, const neu_NeuralNetwork* p_nn) {
/// ...
}
// Define the derivative of your activation function with the following signature
void d_my_act_func(neu_Vector* p_dest, const neu_Vector* p_src, const neu_NeuralNetwork* p_nn) {
/// ...
}
// Define the neu_Activation struct with your functions and an init function
const neu_Activation MY_ACTIVATION_FUNCTION = {
.name = "my_act_func",
.act = my_act_func,
.d_act = d_my_act_func,
.init_fn = neu_xavier_normal
};
// Set the activation function of the layer in the neural network
neu_nn_set_act(1, nn, MY_ACTIVATION_FUNCTION);// Defien your loss function with the following signature
double my_loss_func(const neu_NeuralNetwork* p_nn) {
// ...
}
// Define your loss gradient function with the following signature
void my_loss_func_grad(neu_Vector* p_dest, const neu_NeuralNetwork* p_nn) {
// ...
}
// Define the neu_Loss struct with your functions
const neu_Loss MY_LOSS_FUNCTION = {
.name = "mse",
.loss = my_loss_func,
.loss_grad = my_loss_func_grad
};
// Set the loss function of the neural network
neu_nn_set_loss(nn, MY_LOSS_FUNCTION);// Define your optimizer with the following signature
void my_optimizer_func(neu_NeuralNetwork* p_nn, const size_t p_layer) {
// ...
}
// Define the neu_Optimizer struct with your function
const neu_Optimizer NEU_MY_OPTIMIZER = {
.name = "my_optimizer",
.opt = my_optimizer_func
};
// Set the optimizer for the neural network
neu_nn_set_opt(nn, NEU_MY_OPTIMIZER);