Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,8 @@ poetry run mlflow models build-docker --model-uri models:/Diabetes_Prediction_Mo


## Walmart Sales
This example uses a public dataset on Walmart sales over a specific period. The primary goal of this use case is to demonstrate how to deploy multiple models using a single serving endpoint. Additionally, it highlights some limitations of MLflow's ability to automatically generate the appropriate Dockerfile for model deployment.

This example utilizes a public dataset containing Walmart sales data over a defined period. The main objective is to showcase how to deploy multiple models through a single serving endpoint. In this scenario, a separate model is trained for each store, and all store-specific models are saved as artifacts when registering the final model. This approach enables flexible and scalable deployment for store-level predictions.

* package: `examples/walmart_sales_regression`

Expand Down
22 changes: 19 additions & 3 deletions examples/walmart_sales_regression/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
from typing import List
from typing import Optional
from mlflow.models import infer_signature
from pathlib import Path
import platform


class WalmartSalesRegressor(mlflow.pyfunc.PythonModel):
Expand All @@ -35,13 +37,27 @@ def load_context(self, context):
:param context: The context object containing the model.
:return: None
"""
if platform.system() == "Linux":
# Convert Windows-style paths to POSIX paths for Linux compatibility
print(
"Converting Windows-style paths to POSIX paths for Linux compatibility..."
)
context_artifacts = {
key: value.replace("\\", "/")
for key, value in context.artifacts.items()
}

else:
# Use the context artifacts as is for non-Linux systems
print("Using context artifacts as is for non-Linux systems...")
context_artifacts = context.artifacts

model_artifacts = context.artifacts
print("Loading model artifacts from context...")
self.models = {
store_id: mlflow.sklearn.load_model(uri)
for store_id, uri in model_artifacts.items()
for store_id, uri in context_artifacts.items()
}
print(f"Model artifact URIs loaded: {self.artifact_uris}")
print(f"Model artifact URIs loaded: {context_artifacts}")

def fit_model(self, x_train, y_train, store_id: int, run_id: str):
"""
Expand Down
2 changes: 1 addition & 1 deletion examples/walmart_sales_regression/configs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,6 @@ categorical_features:

target: "Weekly_Sales"
# Model configuration
registered_model_name: "walmart-store-sales-regressor"
registered_model_name: "walmart-store-sales-regressor-code"
run_name: "walmart-sales-regressors"
artifact_path: "store-sales-regressor"
8 changes: 4 additions & 4 deletions examples/walmart_sales_regression/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@ def main():
print("Model loaded.")

# # predicting for the store store_id
store_id = "2"
x_test = x_test[x_test["Store"] == int(store_id)]
y_test = y_test[y_test["Store"] == int(store_id)]
store_id = 2
x_test = x_test[x_test["Store"] == store_id]
y_test = y_test[y_test["Store"] == store_id]
x_test = x_test.drop(columns=["Store"])
y_test = y_test.drop(columns=["Store"])

# make predictions
predictions = model.predict(x_test, params={"store_id": store_id})
predictions = model.predict(x_test, params={"store_id": str(store_id)})

weekly_sales = y_test[configs["target"]].values
print(
Expand Down
2 changes: 1 addition & 1 deletion examples/walmart_sales_regression/online_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ def main():
Perform online inference using a REST API.

To deploy the model using the local server, run the following command:
`poetry run mlflow models serve -m models:/walmart-store-sales-regressor@production -p 5000 --env-manager local`
`poetry run mlflow models serve -m models:/walmart-store-sales-regressor-code@production -p 5000 --env-manager local`
"""

url = "http://localhost:5000/invocations"
Expand Down
18 changes: 0 additions & 18 deletions examples/walmart_sales_regression/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,26 +60,8 @@ def main(**kwargs):
artifacts=store_sales_regressor.artifact_uris,
)

# log model with code
mlflow.pyfunc.log_model(
artifact_path=configs["artifact_path"] + "-code",
python_model=store_sales_regressor,
infer_code_paths=True,
registered_model_name=registered_model_name + "-code",
input_example=x_test.sample(5),
signature=signature,
artifacts=store_sales_regressor.artifact_uris,
)

print("Models fitted successfully.")

set_alias_to_latest_version(
registered_model_name=registered_model_name,
alias="production",
client=kwargs["mlflow_client"],
)
set_alias_to_latest_version(
registered_model_name=registered_model_name + "-code",
alias="production",
client=kwargs["mlflow_client"],
)

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "mlflow_for_ml_dev"
version = "1.5.4"
version = "1.6.0"
description = "Code examples for the youtube playlist 'MLflow for Machine Learning Development' by Manuel Gil"
authors = ["Manuel Gil <[email protected]>"]
readme = "README.md"
Expand Down
Loading