diff --git a/README.md b/README.md index a3ac1bd..e33f35b 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ micromamba create -f environment.yml micromamba activate seldonenv ``` -### Compilation +### Compilation and Installation We use `meson` to compile and build Seldon. @@ -31,6 +31,13 @@ meson setup build meson compile -C build ``` +To install `seldon` to your `conda` environment, run the following: + +```bash +meson setup build --prefix $CONDA_PREFIX +meson install -C build +``` + ### Quick Start Run the executable, and provide the input configuration TOML file (as the first @@ -41,6 +48,12 @@ cd build ./seldon /path/to/config -o /path/to/output/dir ``` +If you've installed it, you can simply run `seldon` anywhere. + +```bash +seldon /path/to/config -o /path/to/output/dir +``` + #### Output files The file `network.txt` contains information about the network. First column is the index of the agent, then the next column is the number of incoming agent connections *including* the agent itself. Subsequent columns are the neighbouring incoming agent indices and weights. In addition, every iteration produces a *double* opinion value for each agent. These are outputted to files named opinions_i.txt. diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 7e55389..e1dc5a7 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -52,7 +52,8 @@ void Seldon::ActivityAgentModel::update_network_probabilistic() { // Implement the weight for the probability of agent `idx_agent` contacting agent `j` // Not normalised since this is taken care of by the reservoir sampling - auto weight_callback = [idx_agent, this]( size_t j ) { + auto weight_callback = [idx_agent, this]( size_t j ) + { if( idx_agent == j ) // The agent does not contact itself return 0.0; return std::pow( @@ -111,7 +112,8 @@ void Seldon::ActivityAgentModel::update_network_mean() contact_prob_list[idx_agent] = weights; // set to zero } - auto probability_helper = []( double omega, size_t m ) { + auto probability_helper = []( double omega, size_t m ) + { double p = 0; for( size_t i = 1; i <= m; i++ ) p += ( std::pow( -omega, i + 1 ) + omega ) / ( omega + 1 ); @@ -122,7 +124,8 @@ void Seldon::ActivityAgentModel::update_network_mean() { // Implement the weight for the probability of agent `idx_agent` contacting agent `j` // Not normalised since this is taken care of by the reservoir sampling - auto weight_callback = [idx_agent, this]( size_t j ) { + auto weight_callback = [idx_agent, this]( size_t j ) + { if( idx_agent == j ) // The agent does not contact itself return 0.0; return std::pow(