Skip to content

Commit

Permalink
add tests for synthese + separate classes in different files + separa…
Browse files Browse the repository at this point in the history
…te tests per module + kwargs can now use CLater
  • Loading branch information
jacquesfize committed Mar 4, 2024
1 parent 6190619 commit 79fa135
Show file tree
Hide file tree
Showing 7 changed files with 284 additions and 203 deletions.
3 changes: 3 additions & 0 deletions backend/geonature/tests/benchmarks/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Import required for CLater class when using eval()
from flask import url_for
from .benchmark_data import *
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
from geonature.utils.env import db
import pytest
from ref_geo.models import BibAreasTypes, LAreas
from sqlalchemy import select

benchmark_synthese_intersection_data_test_bbox = {
"modif_since_validation": False,
"geoIntersection": {
Expand Down Expand Up @@ -88,10 +93,47 @@
},
}

benchmark_synthese_intersection_data_test_commune = {
"modif_since_validation": False,
"area_COM": [28612],
}

def benchmark_synthese_intersection_data_test_commune():
return {
"modif_since_validation": False,
"area_COM": [
db.session.scalars(
select(LAreas).join(BibAreasTypes).where(BibAreasTypes.type_code == "COM").limit(1)
)
.one()
.id_area
],
}


def benchmark_synthese_intersection_data_test_departement():
return {
"modif_since_validation": False,
"area_DEP": [
db.session.scalars(
select(LAreas.id_area)
.join(BibAreasTypes)
.where(BibAreasTypes.type_code == "DEP")
.limit(1)
).first()
],
}


def benchmark_synthese_intersection_data_test_region():
return {
"modif_since_validation": False,
"area_REG": [
db.session.scalars(
select(LAreas.id_area)
.join(BibAreasTypes)
.where(BibAreasTypes.type_code == "REG")
.limit(1)
).first()
],
}


benchmark_synthese_with_tree_taxon = {
"modif_since_validation": False,
Expand Down
99 changes: 99 additions & 0 deletions backend/geonature/tests/benchmarks/benchmark_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
from typing import Any
from geonature.tests.utils import set_logged_user
from geonature.tests.fixtures import users

import importlib
from geonature.tests.benchmarks import *


class CLater:
def __init__(self, value) -> None:
self.value = value


class BenchmarkTest:
"""
Class that allows to define a benchmark test and generate the pytest function to run the benchmark.
Example, in a pytest file:
```python
import pytest
bench = BenchmarkTest(print,"test_print",["Hello","World"],{})
@pytest.mark.usefixtures("client_class", "temporary_transaction")
class TestBenchie:
pass
TestBenchie.test_print = bench.generate_func_test()
```
If a function or its argument depend on the pytest function context, use the GetLatter class : GetLatter("<python expression">). For example, to use
the `url_for()` function, replace from `url_for(...)` to `GetLatter("url_for(...)")`.
If the benchmark requires a user to be logged, use the `function_kwargs` with the "user_profile" key and the value corresponds to a key
available in the dictionary returned by the `user` fixture.
"""

def __init__(self, function, function_args=[], function_kwargs={}) -> None:
"""
Constructor of BenchmarkTest
Parameters
----------
function : Callable | GetLatter
function that will be benchmark
name_benchmark : str
name of the benchmark
function_args : Sequence[Any | GetLatter]
args for the function
function_kwargs : Dict[str,Any]
kwargs for the function
"""
self.function = function
self.function_args = function_args
self.function_kwargs = function_kwargs

def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.generate_func_test()

def generate_func_test(self):
"""
Return the pytest function to run the benchmark on the indicated function.
Returns
-------
Callable
test function
Raises
------
KeyError
if the user_profile given do not exists
"""

fixtures = self.function_kwargs.pop("fixtures", [])
user_profile = self.function_kwargs.pop("user_profile", None)
imports = self.function_kwargs.pop("imports", [])

func, args, kwargs = self.function, self.function_args, self.function_kwargs

def function_to_include_fixture(*fixture):

def final_test_function(self, benchmark, users):

if user_profile:
if not user_profile in users:
raise KeyError(f"{user_profile} can't be found in the users fixture !")
set_logged_user(self.client, users[user_profile])
benchmark(
eval(func.value) if isinstance(func, CLater) else func,
*[eval(arg.value) if isinstance(arg, CLater) else arg for arg in args],
**{
key: eval(value.value) if isinstance(value, CLater) else value
for key, value in kwargs.items()
},
)

return final_test_function

return function_to_include_fixture(*fixtures)
22 changes: 22 additions & 0 deletions backend/geonature/tests/benchmarks/test_benchmark_occhab.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import logging
import pytest
from geonature.tests.benchmarks import *
from geonature.tests.test_pr_occhab import stations

from .benchmark_generator import BenchmarkTest, CLater

logging.basicConfig()
logger = logging.getLogger("logger-name")
logger.setLevel(logging.DEBUG)

from .utils import CLIENT_GET, CLIENT_POST


@pytest.mark.usefixtures("client_class", "temporary_transaction")
class TestBenchmarkOcchab:

test_get_station = BenchmarkTest(
CLIENT_GET,
[CLater("""url_for("occhab.get_station", id_station=8)""")],
dict(user_profile="user", fixtures=[stations]),
)()
70 changes: 70 additions & 0 deletions backend/geonature/tests/benchmarks/test_benchmark_synthese.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import logging
import pytest
from geonature.tests.benchmarks import *
from geonature.tests.test_pr_occhab import stations

from .benchmark_generator import BenchmarkTest, CLater

logging.basicConfig()
logger = logging.getLogger("logger-name")
logger.setLevel(logging.DEBUG)

from .utils import CLIENT_GET, CLIENT_POST


@pytest.mark.usefixtures("client_class", "temporary_transaction") # , "activate_profiling_sql")
class TestBenchmarkSynthese:
test_get_default_nomenclatures = BenchmarkTest(
CLIENT_GET,
[CLater("""url_for("gn_synthese.getDefaultsNomenclatures")""")],
dict(user_profile="self_user"),
)()
test_synthese_with_geometry_bbox = BenchmarkTest(
CLIENT_POST,
[CLater("""url_for("gn_synthese.get_observations_for_web")""")],
dict(
user_profile="admin_user",
json=benchmark_synthese_intersection_data_test_bbox,
),
)()
test_synthese_with_geometry_complex_poly = BenchmarkTest(
CLIENT_POST,
[CLater("""url_for("gn_synthese.get_observations_for_web")""")],
dict(
user_profile="admin_user",
json=benchmark_synthese_intersection_data_test_complex_polygon,
),
)()
test_synthese_with_commune = BenchmarkTest(
CLIENT_POST,
[CLater("""url_for("gn_synthese.get_observations_for_web")""")],
dict(
user_profile="admin_user",
json=CLater("benchmark_data.benchmark_synthese_intersection_data_test_commune()"),
),
)()

test_synthese_with_departement = BenchmarkTest(
CLIENT_POST,
[CLater("""url_for("gn_synthese.get_observations_for_web")""")],
dict(
user_profile="admin_user",
json=CLater("benchmark_data.benchmark_synthese_intersection_data_test_departement()"),
),
)()
test_synthese_with_region = BenchmarkTest(
CLIENT_POST,
[CLater("""url_for("gn_synthese.get_observations_for_web")""")],
dict(
user_profile="admin_user",
json=CLater("benchmark_data.benchmark_synthese_intersection_data_test_region()"),
),
)()
test_synthese_with_up_tree_taxon = BenchmarkTest(
CLIENT_POST,
[CLater("""url_for("gn_synthese.get_observations_for_web")""")],
dict(
user_profile="admin_user",
json=benchmark_synthese_with_tree_taxon,
),
)()
44 changes: 44 additions & 0 deletions backend/geonature/tests/benchmarks/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import time
import logging

import pytest
import pandas
from sqlalchemy import event

from geonature.utils.env import db
from .benchmark_generator import CLater

logging.basicConfig()
logger = logging.getLogger("logger-name")
logger.setLevel(logging.DEBUG)


@pytest.fixture(scope="class")
def activate_profiling_sql():
"""
Fixture to activate profiling for SQL queries and storing query's statements and execution times in a csv file.
"""

results_file = "sql_queries.csv"
df = pandas.DataFrame([], columns=["Query", "Total Time [s.]"])
df.to_csv(results_file, mode="a", header=True, index=None, sep=";")

# @event.listens_for(Engine, "before_cursor_execute")
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
conn.info.setdefault("query_start_time", []).append(time.time())
logger.debug("Start Query: %s" % statement)

# @event.listens_for(Engine, "after_cursor_execute")
def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
total = time.time() - conn.info["query_start_time"].pop(-1)
logger.debug("Query Complete!")
logger.debug("Total Time: %f" % total)
if statement.startswith("SELECT"):
df = pandas.DataFrame([[statement, total]], columns=["Query", "Total Time"])
df.to_csv(results_file, mode="a", header=False, index=None, sep=";")

event.listen(db.engine, "before_cursor_execute", before_cursor_execute)
event.listen(db.engine, "after_cursor_execute", after_cursor_execute)


CLIENT_GET, CLIENT_POST = CLater("self.client.get"), CLater("self.client.post")
Loading

0 comments on commit 79fa135

Please sign in to comment.