diff --git a/compass/scripts/process.py b/compass/scripts/process.py index 6e7938f3..a71a1ecb 100644 --- a/compass/scripts/process.py +++ b/compass/scripts/process.py @@ -181,6 +181,13 @@ async def process_jurisdictions_with_openai( # noqa: PLR0917, PLR0913 } ] + .. IMPORTANT:: + You will need to ensure that the model name used here + matches your deployment if you are using Azure OpenAI. For + example, if you deployed the GPT-4o-mini model under the + name ``"gpt-4o-mini-2025-04-11"``, you would want to set + ``"model": "gpt-4o-mini-2025-04-11"``. + By default, ``"gpt-4o"``. num_urls_to_check_per_jurisdiction : int, optional Number of unique Google search result URLs to check for each diff --git a/examples/execution_basics/README.rst b/examples/execution_basics/README.rst index 8b7fd355..cae42ee3 100644 --- a/examples/execution_basics/README.rst +++ b/examples/execution_basics/README.rst @@ -64,6 +64,13 @@ To override this default, add a ``model`` key to your config: "model": "gpt-4o-mini" +.. IMPORTANT:: + You will need to update the model name to match your deployment if you are using Azure OpenAI. + For example, if you deployed the GPT-4o-mini model under the name ``"gpt-4o-mini-2025-04-11"``, + you would set ``"model": "gpt-4o-mini-2025-04-11"`` in the COMPASS config (along with the + deployment endpoint, version, and API key). + + Typical Config -------------- In most cases, you'll want more control over the execution parameters, especially those related to the LLM configuration. diff --git a/examples/execution_basics/config_kitchen_sink.json5 b/examples/execution_basics/config_kitchen_sink.json5 index 046ab93a..015b6ce2 100644 --- a/examples/execution_basics/config_kitchen_sink.json5 +++ b/examples/execution_basics/config_kitchen_sink.json5 @@ -5,6 +5,7 @@ "jurisdiction_fp": "jurisdictions.csv", "model": [ { + // make sure the model name matches your Azure deployment model name! "name": "deployment-gpt-4o-mini", "llm_call_kwargs":{ "temperature": 0, @@ -16,8 +17,8 @@ "text_splitter_chunk_overlap": 500, "client_type": "azure", // this is the default "client_kwargs": { - "azure_api_key": "", - "azure_version": "", + "api_key": "", + "api_version": "", "azure_endpoint": "", }, // "default" has to appear as a task exactly once across @@ -25,6 +26,7 @@ "tasks": "default", }, { + // make sure the model name matches your Azure deployment model name! "name": "deployment-gpt-4o", "llm_call_kwargs":{ "temperature": 0, @@ -36,8 +38,8 @@ "text_splitter_chunk_overlap": 1000, "client_type": "azure", // this is the default "client_kwargs": { - "azure_api_key": "", - "azure_version": "", + "api_key": "", + "api_version": "", "azure_endpoint": "", }, "tasks": [ @@ -48,6 +50,8 @@ ] }, { + // client type is "openai" below (not "azure"), + // so we use standard model names (not deployment-specific) "name": "gpt-4o-mini", "llm_call_kwargs":{ "temperature": 0, diff --git a/examples/execution_basics/config_recommended.json5 b/examples/execution_basics/config_recommended.json5 index 293ef90a..61798fbb 100644 --- a/examples/execution_basics/config_recommended.json5 +++ b/examples/execution_basics/config_recommended.json5 @@ -5,7 +5,8 @@ "jurisdiction_fp": "jurisdictions.csv", "model": [ { - "name": "gpt-4o-mini", + // make sure the model name matches your Azure deployment model name! + "name": "deployment-gpt-4o-mini", "llm_call_kwargs":{ "temperature": 0, "timeout": 300 @@ -15,8 +16,8 @@ "text_splitter_chunk_overlap": 500, "client_kwargs": { // default client is Azure OpenAI - "azure_api_key": "", - "azure_version": "", + "api_key": "", + "api_version": "", "azure_endpoint": "", }, },