From c714f224f93612374f5fb2271eb6c9429647f1c4 Mon Sep 17 00:00:00 2001
From: Mahesh Sathiamoorthy
Date: Thu, 12 Dec 2024 18:11:18 +0000
Subject: [PATCH 01/34] Remove code accidentally added, and add a type
shortcut.
---
src/bespokelabs/curator/llm/llm.py | 23 ++++-------------------
1 file changed, 4 insertions(+), 19 deletions(-)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index e5eadaa5..69dc7b6e 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -29,6 +29,7 @@
_CURATOR_DEFAULT_CACHE_DIR = "~/.cache/curator"
T = TypeVar("T")
+_DictOrBaseModel = Union[Dict[str, Any], BaseModel]
logger = logger = logging.getLogger(__name__)
@@ -70,28 +71,12 @@ def _determine_backend(
)
return "litellm"
- @staticmethod
- def _convert_response_to_dict(response):
- if hasattr(response, "model_dump"):
- return response.model_dump()
- elif isinstance(response, dict):
- return response
- elif hasattr(response, "__dict__"):
- return response.__dict__
- return response
-
def __init__(
self,
model_name: str,
- prompt_func: Callable[[Union[Dict[str, Any], BaseModel]], Dict[str, str]],
+ prompt_func: Callable[[_DictOrBaseModel], _DictOrBaseModel],
parse_func: Optional[
- Callable[
- [
- Union[Dict[str, Any], BaseModel],
- Union[Dict[str, Any], BaseModel],
- ],
- T,
- ]
+ Callable[[_DictOrBaseModel, _DictOrBaseModel], _DictOrBaseModel]
] = None,
response_format: Optional[Type[BaseModel]] = None,
backend: Optional[str] = None,
@@ -101,7 +86,7 @@ def __init__(
batch_size: Optional[int] = None,
batch_check_interval: Optional[int] = 60,
delete_successful_batch_files: bool = True,
- delete_failed_batch_files: bool = False, # To allow users to debug failed batches
+ delete_failed_batch_files: bool = False,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
presence_penalty: Optional[float] = None,
From 110aa149325141afb16b9a1128faf777d060bef4 Mon Sep 17 00:00:00 2001
From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Date: Fri, 13 Dec 2024 06:24:15 +0000
Subject: [PATCH 02/34] Refactor LLM class to use context manager for batch
parameters
- Add BatchConfig dataclass for batch configuration
- Implement context manager for batch processing
- Update batch processing initialization to maintain state
- Update simple_batch.py to use new context manager API
- Maintain backward compatibility with existing batch parameters
Co-Authored-By: mahesh@bespokelabs.ai
---
src/bespokelabs/curator/llm/llm.py | 251 ++++++++++++++++++++++-------
tests/batch/simple_batch.py | 11 +-
2 files changed, 196 insertions(+), 66 deletions(-)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index e5eadaa5..7fd16ec2 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -3,6 +3,7 @@
import inspect
import logging
import os
+from dataclasses import dataclass
from datetime import datetime
from io import BytesIO
from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar, Union
@@ -33,6 +34,25 @@
logger = logger = logging.getLogger(__name__)
+@dataclass
+class BatchConfig:
+ """Configuration for batch processing in LLM.
+
+ This class holds all configuration parameters related to batch processing,
+ used by the LLM context manager for batch operations.
+
+ Args:
+ batch_size: Maximum number of requests per batch. If None, defaults to 1,000.
+ batch_check_interval: How often to check batch status, in seconds.
+ delete_successful_batch_files: Whether to delete batch files after successful processing.
+ delete_failed_batch_files: Whether to delete batch files after failed processing.
+ """
+ batch_size: Optional[int] = None
+ batch_check_interval: int = 60
+ delete_successful_batch_files: bool = True
+ delete_failed_batch_files: bool = False
+
+
class LLM:
"""Interface for prompting LLMs."""
@@ -97,15 +117,16 @@ def __init__(
backend: Optional[str] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
- batch: bool = False,
- batch_size: Optional[int] = None,
- batch_check_interval: Optional[int] = 60,
- delete_successful_batch_files: bool = True,
- delete_failed_batch_files: bool = False, # To allow users to debug failed batches
temperature: Optional[float] = None,
top_p: Optional[float] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
+ # Deprecated parameters
+ batch: bool = False,
+ batch_size: Optional[int] = None,
+ batch_check_interval: Optional[int] = 60,
+ delete_successful_batch_files: bool = True,
+ delete_failed_batch_files: bool = False,
):
"""Initialize a LLM.
@@ -118,82 +139,190 @@ def __init__(
response_format: A Pydantic model specifying the
response format from the LLM.
backend: The backend to use ("openai" or "litellm"). If None, will be auto-determined
- batch: Whether to use batch processing
- batch_size: The size of the batch to use, only used if batch is True
- batch_check_interval: The interval to check for batch completions, only used if batch is True
- delete_successful_batch_files: Whether to delete successful batch files, only used if batch is True
- delete_failed_batch_files: Whether to delete failed batch files, only used if batch is True
- temperature: The temperature to use for the LLM, only used if batch is False
- top_p: The top_p to use for the LLM, only used if batch is False
- presence_penalty: The presence_penalty to use for the LLM, only used if batch is False
- frequency_penalty: The frequency_penalty to use for the LLM, only used if batch is False
+ max_requests_per_minute: Maximum requests per minute (not supported in batch mode)
+ max_tokens_per_minute: Maximum tokens per minute (not supported in batch mode)
+ temperature: The temperature to use for the LLM
+ top_p: The top_p to use for the LLM
+ presence_penalty: The presence_penalty to use for the LLM
+ frequency_penalty: The frequency_penalty to use for the LLM
+
+ Deprecated Args:
+ batch: Whether to use batch processing (deprecated, use context manager instead)
+ batch_size: The size of the batch to use (deprecated, use context manager instead)
+ batch_check_interval: The interval to check for batch completions (deprecated)
+ delete_successful_batch_files: Whether to delete successful batch files (deprecated)
+ delete_failed_batch_files: Whether to delete failed batch files (deprecated)
"""
+ import warnings
+
+ if batch or batch_size is not None:
+ warnings.warn(
+ "The 'batch' and batch-related parameters are deprecated. "
+ "Please use the context manager instead:\n"
+ "with llm.batch(batch_size=N, batch_check_interval=X): ...",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
self.prompt_formatter = PromptFormatter(
model_name, prompt_func, parse_func, response_format
)
- self.batch_mode = batch
+
+ # Initialize context manager state
+ self._batch_config = None
+ self._original_request_processor = None
+
+ # Store model parameters
+ self.temperature = temperature
+ self.top_p = top_p
+ self.presence_penalty = presence_penalty
+ self.frequency_penalty = frequency_penalty
+ self.model_name = model_name
# Auto-determine backend if not specified
- # Use provided backend or auto-determine based on model and format
if backend is not None:
self.backend = backend
else:
self.backend = self._determine_backend(model_name, response_format)
- # Select request processor based on backend
- if self.backend == "openai":
- if batch:
- if batch_size is None:
- batch_size = 1_000
- logger.info(
- f"batch=True but no batch_size provided, using default batch_size of {batch_size:,}"
- )
- if max_requests_per_minute is not None or max_tokens_per_minute is not None:
- logger.warning(
- "max_requests_per_minute and max_tokens_per_minute not supported with batch mode"
- )
- self._request_processor = OpenAIBatchRequestProcessor(
- model=model_name,
- batch_size=batch_size,
- temperature=temperature,
- top_p=top_p,
- batch_check_interval=batch_check_interval,
- presence_penalty=presence_penalty,
- frequency_penalty=frequency_penalty,
- delete_successful_batch_files=delete_successful_batch_files,
- delete_failed_batch_files=delete_failed_batch_files,
- )
- else:
- if batch_size is not None:
- logger.warning(
- f"LLM argument `batch_size` {batch_size} is ignored because `batch` is False"
- )
- self._request_processor = OpenAIOnlineRequestProcessor(
- model=model_name,
- temperature=temperature,
- top_p=top_p,
- presence_penalty=presence_penalty,
- frequency_penalty=frequency_penalty,
- max_requests_per_minute=max_requests_per_minute,
- max_tokens_per_minute=max_tokens_per_minute,
+ # If using deprecated batch parameters, set up batch config
+ if batch or batch_size is not None:
+ self._batch_config = BatchConfig(
+ batch_size=batch_size,
+ batch_check_interval=batch_check_interval,
+ delete_successful_batch_files=delete_successful_batch_files,
+ delete_failed_batch_files=delete_failed_batch_files,
+ )
+
+ # Initialize request processor
+ self._setup_request_processor(
+ max_requests_per_minute=max_requests_per_minute,
+ max_tokens_per_minute=max_tokens_per_minute,
+ )
+
+ def _setup_request_processor(
+ self,
+ max_requests_per_minute: Optional[int] = None,
+ max_tokens_per_minute: Optional[int] = None,
+ ):
+ """Set up the appropriate request processor based on current config.
+
+ This method initializes the request processor based on the current configuration,
+ including batch mode settings if a batch context is active. It handles both
+ OpenAI and LiteLLM backends, with appropriate warnings for unsupported configurations.
+
+ Args:
+ max_requests_per_minute: Maximum requests per minute (not supported in batch mode)
+ max_tokens_per_minute: Maximum tokens per minute (not supported in batch mode)
+ """
+ # Store current processor before potentially switching to batch mode
+ if hasattr(self, '_request_processor'):
+ self._original_request_processor = self._request_processor
+
+ # Check if we're in batch mode (either via context manager or deprecated params)
+ is_batch_mode = self._batch_config is not None
+
+ # If we already have a batch processor of the same type, keep it to maintain state
+ if (is_batch_mode and
+ hasattr(self, '_request_processor') and
+ isinstance(self._request_processor, OpenAIBatchRequestProcessor)):
+ return
+
+ if is_batch_mode and self.backend == "openai":
+ if max_requests_per_minute is not None or max_tokens_per_minute is not None:
+ logger.warning(
+ "max_requests_per_minute and max_tokens_per_minute not supported with batch mode"
)
+ self._request_processor = OpenAIBatchRequestProcessor(
+ model=self.model_name,
+ batch_size=self._batch_config.batch_size or 1_000,
+ batch_check_interval=self._batch_config.batch_check_interval,
+ temperature=self.temperature,
+ top_p=self.top_p,
+ presence_penalty=self.presence_penalty,
+ frequency_penalty=self.frequency_penalty,
+ delete_successful_batch_files=self._batch_config.delete_successful_batch_files,
+ delete_failed_batch_files=self._batch_config.delete_failed_batch_files,
+ )
+ elif self.backend == "openai":
+ self._request_processor = OpenAIOnlineRequestProcessor(
+ model=self.model_name,
+ temperature=self.temperature,
+ top_p=self.top_p,
+ presence_penalty=self.presence_penalty,
+ frequency_penalty=self.frequency_penalty,
+ max_requests_per_minute=max_requests_per_minute,
+ max_tokens_per_minute=max_tokens_per_minute,
+ )
elif self.backend == "litellm":
- if batch:
+ if is_batch_mode:
logger.warning(
- "Batch mode is not supported with LiteLLM backend, ignoring batch=True"
+ "Batch mode is not supported with LiteLLM backend, ignoring batch context"
)
self._request_processor = LiteLLMOnlineRequestProcessor(
- model=model_name,
- temperature=temperature,
- top_p=top_p,
- presence_penalty=presence_penalty,
- frequency_penalty=frequency_penalty,
+ model=self.model_name,
+ temperature=self.temperature,
+ top_p=self.top_p,
+ presence_penalty=self.presence_penalty,
+ frequency_penalty=self.frequency_penalty,
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
)
else:
raise ValueError(f"Unknown backend: {self.backend}")
+ def batch(
+ self,
+ batch_size: Optional[int] = None,
+ batch_check_interval: int = 60,
+ delete_successful_batch_files: bool = True,
+ delete_failed_batch_files: bool = False,
+ ) -> "LLM":
+ """Create a context manager for batch processing.
+
+ Args:
+ batch_size: Maximum number of requests per batch. If None, defaults to 1,000.
+ batch_check_interval: How often to check batch status, in seconds.
+ delete_successful_batch_files: Whether to delete batch files after successful processing.
+ delete_failed_batch_files: Whether to delete batch files after failed processing.
+
+ Returns:
+ self: The LLM instance configured for batch processing.
+
+ Example:
+ ```python
+ llm = LLM(...)
+ with llm.batch(batch_size=100):
+ results = llm(dataset)
+ ```
+ """
+ self._batch_config = BatchConfig(
+ batch_size=batch_size,
+ batch_check_interval=batch_check_interval,
+ delete_successful_batch_files=delete_successful_batch_files,
+ delete_failed_batch_files=delete_failed_batch_files,
+ )
+ return self
+
+ def __enter__(self):
+ """Enter batch context.
+
+ Raises:
+ RuntimeError: If already in a batch context or if batch() wasn't called.
+ """
+ if self._batch_config is None:
+ raise RuntimeError("Must use 'with llm.batch()' to enter batch context")
+ if self._original_request_processor is not None:
+ raise RuntimeError("Already in batch context")
+ self._setup_request_processor()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Exit batch context and restore original request processor."""
+ self._batch_config = None
+ if self._original_request_processor is not None:
+ self._request_processor = self._original_request_processor
+
def __call__(
self,
dataset: Optional[Iterable] = None,
@@ -263,7 +392,7 @@ def _completions(
if self.prompt_formatter.response_format
else "text"
),
- str(self.batch_mode),
+ str(self._batch_config is not None),
str(self.backend),
]
)
@@ -293,7 +422,7 @@ def _completions(
else "text"
),
"run_hash": fingerprint,
- "batch_mode": self.batch_mode,
+ "batch_mode": self._batch_config is not None,
}
metadata_db.store_metadata(metadata_dict)
diff --git a/tests/batch/simple_batch.py b/tests/batch/simple_batch.py
index 251296ae..0a166658 100644
--- a/tests/batch/simple_batch.py
+++ b/tests/batch/simple_batch.py
@@ -15,14 +15,15 @@ def main(args):
prompter = LLM(
prompt_func=lambda row: row["prompt"],
- model_name="gpt-4o-mini",
+ model_name="gpt-3.5-turbo",
response_format=None,
- batch=True,
- batch_size=args.batch_size,
- batch_check_interval=args.batch_check_interval,
)
- dataset = prompter(dataset, batch_cancel=args.cancel)
+ with prompter.batch(
+ batch_size=args.batch_size,
+ batch_check_interval=args.batch_check_interval,
+ ):
+ dataset = prompter(dataset, batch_cancel=args.cancel)
print(dataset.to_pandas())
From 843a2e63e2a7886b1ed3ea5e634d72076e1be7e4 Mon Sep 17 00:00:00 2001
From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Date: Fri, 13 Dec 2024 06:33:41 +0000
Subject: [PATCH 03/34] Apply black formatting to llm.py
Co-Authored-By: mahesh@bespokelabs.ai
---
src/bespokelabs/curator/llm/llm.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index 7fd16ec2..ca8e24fd 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -47,6 +47,7 @@ class BatchConfig:
delete_successful_batch_files: Whether to delete batch files after successful processing.
delete_failed_batch_files: Whether to delete batch files after failed processing.
"""
+
batch_size: Optional[int] = None
batch_check_interval: int = 60
delete_successful_batch_files: bool = True
@@ -216,16 +217,18 @@ def _setup_request_processor(
max_tokens_per_minute: Maximum tokens per minute (not supported in batch mode)
"""
# Store current processor before potentially switching to batch mode
- if hasattr(self, '_request_processor'):
+ if hasattr(self, "_request_processor"):
self._original_request_processor = self._request_processor
# Check if we're in batch mode (either via context manager or deprecated params)
is_batch_mode = self._batch_config is not None
# If we already have a batch processor of the same type, keep it to maintain state
- if (is_batch_mode and
- hasattr(self, '_request_processor') and
- isinstance(self._request_processor, OpenAIBatchRequestProcessor)):
+ if (
+ is_batch_mode
+ and hasattr(self, "_request_processor")
+ and isinstance(self._request_processor, OpenAIBatchRequestProcessor)
+ ):
return
if is_batch_mode and self.backend == "openai":
From 6d801d8ae6464e76637601e7dc75b5e8d5422287 Mon Sep 17 00:00:00 2001
From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Date: Fri, 13 Dec 2024 06:58:20 +0000
Subject: [PATCH 04/34] Move batch processing to top-level context manager
Co-Authored-By: mahesh@bespokelabs.ai
---
src/bespokelabs/curator/llm/batch.py | 104 ++++++++++++++++++++++++
src/bespokelabs/curator/llm/llm.py | 114 ++-------------------------
tests/batch/simple_batch.py | 4 +-
3 files changed, 113 insertions(+), 109 deletions(-)
create mode 100644 src/bespokelabs/curator/llm/batch.py
diff --git a/src/bespokelabs/curator/llm/batch.py b/src/bespokelabs/curator/llm/batch.py
new file mode 100644
index 00000000..a01cc79c
--- /dev/null
+++ b/src/bespokelabs/curator/llm/batch.py
@@ -0,0 +1,104 @@
+"""Batch processing functionality for LLM."""
+
+from dataclasses import dataclass
+from typing import Optional, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from .llm import LLM
+
+
+@dataclass
+class BatchConfig:
+ """Configuration for batch processing in LLM.
+
+ This class holds all configuration parameters related to batch processing,
+ used by the LLM context manager for batch operations.
+
+ Args:
+ batch_size: Maximum number of requests per batch. If None, defaults to 1,000.
+ batch_check_interval: How often to check batch status, in seconds.
+ delete_successful_batch_files: Whether to delete batch files after successful processing.
+ delete_failed_batch_files: Whether to delete batch files after failed processing.
+ """
+
+ batch_size: Optional[int] = None
+ batch_check_interval: int = 60
+ delete_successful_batch_files: bool = True
+ delete_failed_batch_files: bool = False
+
+
+class BatchContext:
+ """Context manager for batch processing.
+
+ This class provides a context manager interface for batch processing with LLM instances.
+ It handles the setup and teardown of batch processing configuration, ensuring proper
+ state management of the LLM instance.
+
+ Example:
+ ```python
+ from bespokelabs.curator.llm import LLM, batch
+
+ llm = LLM(...)
+ with batch(llm, batch_size=100):
+ results = llm(dataset)
+ ```
+ """
+
+ def __init__(self, llm: "LLM", **kwargs):
+ """Initialize batch context.
+
+ Args:
+ llm: The LLM instance to use for batch processing.
+ **kwargs: Batch configuration parameters passed to BatchConfig.
+ """
+ self.llm = llm
+ self.config = BatchConfig(**kwargs)
+ self._original_processor = None
+
+ def __enter__(self):
+ """Enter batch context.
+
+ Returns:
+ The LLM instance configured for batch processing.
+
+ Raises:
+ RuntimeError: If already in a batch context.
+ """
+ if hasattr(self.llm, "_batch_config") and self.llm._batch_config is not None:
+ raise RuntimeError("Already in batch context")
+ self.llm._batch_config = self.config
+ self.llm._setup_request_processor()
+ return self.llm
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Exit batch context and restore original request processor."""
+ self.llm._batch_config = None
+ if self.llm._original_request_processor is not None:
+ self.llm._request_processor = self.llm._original_request_processor
+
+
+def batch(llm: "LLM", **kwargs) -> BatchContext:
+ """Create a batch processing context for an LLM instance.
+
+ This function creates a context manager that configures an LLM instance for
+ batch processing. The batch processing configuration is active only within
+ the context manager's scope.
+
+ Args:
+ llm: The LLM instance to configure for batch processing.
+ **kwargs: Configuration parameters for batch processing.
+ See BatchConfig for available parameters.
+
+ Returns:
+ A BatchContext instance that can be used as a context manager.
+
+ Example:
+ ```python
+ from bespokelabs.curator.llm import LLM, batch
+
+ llm = LLM(...)
+ with batch(llm, batch_size=100):
+ results = llm(dataset)
+ ```
+ """
+ return BatchContext(llm, **kwargs)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index ca8e24fd..5df35b5e 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -3,7 +3,6 @@
import inspect
import logging
import os
-from dataclasses import dataclass
from datetime import datetime
from io import BytesIO
from typing import Any, Callable, Dict, Iterable, Optional, Type, TypeVar, Union
@@ -14,6 +13,7 @@
from xxhash import xxh64
from bespokelabs.curator.db import MetadataDB
+from bespokelabs.curator.llm.batch import BatchConfig
from bespokelabs.curator.llm.prompt_formatter import PromptFormatter
from bespokelabs.curator.request_processor.base_request_processor import (
BaseRequestProcessor,
@@ -34,26 +34,6 @@
logger = logger = logging.getLogger(__name__)
-@dataclass
-class BatchConfig:
- """Configuration for batch processing in LLM.
-
- This class holds all configuration parameters related to batch processing,
- used by the LLM context manager for batch operations.
-
- Args:
- batch_size: Maximum number of requests per batch. If None, defaults to 1,000.
- batch_check_interval: How often to check batch status, in seconds.
- delete_successful_batch_files: Whether to delete batch files after successful processing.
- delete_failed_batch_files: Whether to delete batch files after failed processing.
- """
-
- batch_size: Optional[int] = None
- batch_check_interval: int = 60
- delete_successful_batch_files: bool = True
- delete_failed_batch_files: bool = False
-
-
class LLM:
"""Interface for prompting LLMs."""
@@ -122,12 +102,6 @@ def __init__(
top_p: Optional[float] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
- # Deprecated parameters
- batch: bool = False,
- batch_size: Optional[int] = None,
- batch_check_interval: Optional[int] = 60,
- delete_successful_batch_files: bool = True,
- delete_failed_batch_files: bool = False,
):
"""Initialize a LLM.
@@ -146,25 +120,7 @@ def __init__(
top_p: The top_p to use for the LLM
presence_penalty: The presence_penalty to use for the LLM
frequency_penalty: The frequency_penalty to use for the LLM
-
- Deprecated Args:
- batch: Whether to use batch processing (deprecated, use context manager instead)
- batch_size: The size of the batch to use (deprecated, use context manager instead)
- batch_check_interval: The interval to check for batch completions (deprecated)
- delete_successful_batch_files: Whether to delete successful batch files (deprecated)
- delete_failed_batch_files: Whether to delete failed batch files (deprecated)
"""
- import warnings
-
- if batch or batch_size is not None:
- warnings.warn(
- "The 'batch' and batch-related parameters are deprecated. "
- "Please use the context manager instead:\n"
- "with llm.batch(batch_size=N, batch_check_interval=X): ...",
- DeprecationWarning,
- stacklevel=2,
- )
-
self.prompt_formatter = PromptFormatter(
model_name, prompt_func, parse_func, response_format
)
@@ -186,15 +142,6 @@ def __init__(
else:
self.backend = self._determine_backend(model_name, response_format)
- # If using deprecated batch parameters, set up batch config
- if batch or batch_size is not None:
- self._batch_config = BatchConfig(
- batch_size=batch_size,
- batch_check_interval=batch_check_interval,
- delete_successful_batch_files=delete_successful_batch_files,
- delete_failed_batch_files=delete_failed_batch_files,
- )
-
# Initialize request processor
self._setup_request_processor(
max_requests_per_minute=max_requests_per_minute,
@@ -210,7 +157,10 @@ def _setup_request_processor(
This method initializes the request processor based on the current configuration,
including batch mode settings if a batch context is active. It handles both
- OpenAI and LiteLLM backends, with appropriate warnings for unsupported configurations.
+ OpenAI and LiteLLM backends, with appropriate processor initialization.
+
+ The batch configuration is managed by the external BatchContext class, which
+ sets self._batch_config when entering the context and clears it when exiting.
Args:
max_requests_per_minute: Maximum requests per minute (not supported in batch mode)
@@ -220,7 +170,7 @@ def _setup_request_processor(
if hasattr(self, "_request_processor"):
self._original_request_processor = self._request_processor
- # Check if we're in batch mode (either via context manager or deprecated params)
+ # Check if we're in batch mode via external BatchContext
is_batch_mode = self._batch_config is not None
# If we already have a batch processor of the same type, keep it to maintain state
@@ -274,58 +224,6 @@ def _setup_request_processor(
else:
raise ValueError(f"Unknown backend: {self.backend}")
- def batch(
- self,
- batch_size: Optional[int] = None,
- batch_check_interval: int = 60,
- delete_successful_batch_files: bool = True,
- delete_failed_batch_files: bool = False,
- ) -> "LLM":
- """Create a context manager for batch processing.
-
- Args:
- batch_size: Maximum number of requests per batch. If None, defaults to 1,000.
- batch_check_interval: How often to check batch status, in seconds.
- delete_successful_batch_files: Whether to delete batch files after successful processing.
- delete_failed_batch_files: Whether to delete batch files after failed processing.
-
- Returns:
- self: The LLM instance configured for batch processing.
-
- Example:
- ```python
- llm = LLM(...)
- with llm.batch(batch_size=100):
- results = llm(dataset)
- ```
- """
- self._batch_config = BatchConfig(
- batch_size=batch_size,
- batch_check_interval=batch_check_interval,
- delete_successful_batch_files=delete_successful_batch_files,
- delete_failed_batch_files=delete_failed_batch_files,
- )
- return self
-
- def __enter__(self):
- """Enter batch context.
-
- Raises:
- RuntimeError: If already in a batch context or if batch() wasn't called.
- """
- if self._batch_config is None:
- raise RuntimeError("Must use 'with llm.batch()' to enter batch context")
- if self._original_request_processor is not None:
- raise RuntimeError("Already in batch context")
- self._setup_request_processor()
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- """Exit batch context and restore original request processor."""
- self._batch_config = None
- if self._original_request_processor is not None:
- self._request_processor = self._original_request_processor
-
def __call__(
self,
dataset: Optional[Iterable] = None,
diff --git a/tests/batch/simple_batch.py b/tests/batch/simple_batch.py
index 0a166658..9ad767e7 100644
--- a/tests/batch/simple_batch.py
+++ b/tests/batch/simple_batch.py
@@ -1,4 +1,5 @@
from bespokelabs.curator import LLM
+from bespokelabs.curator.llm.batch import batch
from datasets import Dataset
import logging
import argparse
@@ -19,7 +20,8 @@ def main(args):
response_format=None,
)
- with prompter.batch(
+ with batch(
+ prompter,
batch_size=args.batch_size,
batch_check_interval=args.batch_check_interval,
):
From 1042e23d81560d5a1123486fe0579af1f41eb113 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 09:52:52 -0800
Subject: [PATCH 05/34] set default max attempts to 10
---
.../base_online_request_processor.py | 26 ++++++++++++-------
1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index 19032f75..e6650d9b 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -22,8 +22,9 @@
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
-DEFAULT_REQUESTS_PER_MINUTE = 100
-DEFAULT_TOKENS_PER_MINUTE = 100_000
+DEFAULT_MAX_REQUESTS_PER_MINUTE = 100
+DEFAULT_MAX_TOKENS_PER_MINUTE = 100_000
+DEFAULT_MAX_RETRIES = 10
@dataclass
@@ -134,13 +135,18 @@ def __init__(
self.prompt_formatter: Optional[PromptFormatter] = None
self.max_requests_per_minute: Optional[int] = max_requests_per_minute
self.max_tokens_per_minute: Optional[int] = max_tokens_per_minute
- self.DEFAULT_MAX_REQUESTS_PER_MINUTE = DEFAULT_REQUESTS_PER_MINUTE
- self.DEFAULT_MAX_TOKENS_PER_MINUTE = DEFAULT_TOKENS_PER_MINUTE
def get_rate_limit(self, name, header_value):
"""Uses manual values if set, otherwise uses headers if available, and if not available uses defaults."""
- manual_value = getattr(self, name)
- default_value = getattr(self, f"DEFAULT_{name.upper()}")
+ if name == "max_requests_per_minute":
+ manual_value = self.max_requests_per_minute
+ default_value = DEFAULT_MAX_REQUESTS_PER_MINUTE
+ elif name == "max_tokens_per_minute":
+ manual_value = self.max_tokens_per_minute
+ default_value = DEFAULT_MAX_TOKENS_PER_MINUTE
+ else:
+ raise ValueError(f"Invalid rate limit name: {name}")
+
if manual_value is not None:
logger.info(f"Manually set {name} to {manual_value}")
return manual_value
@@ -213,7 +219,7 @@ def run(
self.process_requests_from_file(
generic_request_filepath=request_file,
save_filepath=response_file,
- max_attempts=5,
+ max_attempts=DEFAULT_MAX_RETRIES,
resume=True,
)
)
@@ -380,10 +386,10 @@ async def process_requests_from_file(
token_estimate = self.estimate_total_tokens(
retry_request.generic_request.messages
)
- attempt_number = 6 - retry_request.attempts_left
+ attempt_number = DEFAULT_RETRIES + 1 - retry_request.attempts_left
logger.info(
f"Processing retry for request {retry_request.task_id} "
- f"(attempt #{attempt_number} of 5). "
+ f"(attempt #{attempt_number} of {DEFAULT_RETRIES}). "
f"Previous errors: {retry_request.result}"
)
@@ -472,7 +478,7 @@ async def handle_single_request_with_retries(
retry_queue.put_nowait(request)
else:
logger.error(
- f"Request {request.task_id} failed permanently after exhausting all 5 retry attempts. "
+ f"Request {request.task_id} failed permanently after exhausting all {DEFAULT_MAX_RETRIES} retry attempts. "
f"Errors: {[str(e) for e in request.result]}"
)
generic_response = GenericResponse(
From d642227577e6ac26a2a63892913e3068f8b5ead4 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 09:53:32 -0800
Subject: [PATCH 06/34] update logging
---
.../request_processor/base_online_request_processor.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index e6650d9b..895d59e5 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -386,10 +386,10 @@ async def process_requests_from_file(
token_estimate = self.estimate_total_tokens(
retry_request.generic_request.messages
)
- attempt_number = DEFAULT_RETRIES + 1 - retry_request.attempts_left
+ attempt_number = DEFAULT_MAX_RETRIES + 1 - retry_request.attempts_left
logger.info(
f"Processing retry for request {retry_request.task_id} "
- f"(attempt #{attempt_number} of {DEFAULT_RETRIES}). "
+ f"(attempt #{attempt_number} of {DEFAULT_MAX_RETRIES}). "
f"Previous errors: {retry_request.result}"
)
From fe5c123b849d719b6bf0d73e98c786942e660e98 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 10:06:37 -0800
Subject: [PATCH 07/34] add a strict check on requiring all responses
---
.../base_online_request_processor.py | 5 +--
.../base_request_processor.py | 36 ++++++++++++++-----
.../litellm_online_request_processor.py | 2 ++
.../openai_batch_request_processor.py | 3 +-
.../openai_online_request_processor.py | 2 ++
5 files changed, 37 insertions(+), 11 deletions(-)
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index 895d59e5..f4d9691b 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -125,8 +125,9 @@ def __init__(
frequency_penalty: Optional[float] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
+ require_all_responses: bool = False,
):
- super().__init__(batch_size=None)
+ super().__init__(batch_size=None, require_all_responses=require_all_responses)
self.model: str = model
self.temperature: float | None = temperature
self.top_p: float | None = top_p
@@ -386,7 +387,7 @@ async def process_requests_from_file(
token_estimate = self.estimate_total_tokens(
retry_request.generic_request.messages
)
- attempt_number = DEFAULT_MAX_RETRIES + 1 - retry_request.attempts_left
+ attempt_number = 1 + DEFAULT_MAX_RETRIES - retry_request.attempts_left
logger.info(
f"Processing retry for request {retry_request.task_id} "
f"(attempt #{attempt_number} of {DEFAULT_MAX_RETRIES}). "
diff --git a/src/bespokelabs/curator/request_processor/base_request_processor.py b/src/bespokelabs/curator/request_processor/base_request_processor.py
index b5163e34..ca79b9e1 100644
--- a/src/bespokelabs/curator/request_processor/base_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_request_processor.py
@@ -29,8 +29,9 @@ class BaseRequestProcessor(ABC):
Base class for all request processors.
"""
- def __init__(self, batch_size: Optional[int] = None):
+ def __init__(self, batch_size: Optional[int] = None, require_all_responses: bool = False):
self.batch_size = batch_size
+ self.require_all_responses = require_all_responses
# Increase the number of open file descriptors to avoid "Too many open files" errors
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
desired_limit = min(10_000_000, hard)
@@ -216,9 +217,6 @@ def create_dataset_files(
Returns:
Dataset: Completed dataset
"""
- total_responses_count = 0
- failed_responses_count = 0
-
responses_files = glob.glob(f"{working_dir}/responses_*.jsonl")
if len(responses_files) == 0:
raise ValueError(f"No responses files found in {working_dir}")
@@ -230,6 +228,8 @@ def create_dataset_files(
)
# Process all response files
+ total_responses_count = 0
+ failed_responses_count = 0
dataset_file = f"{working_dir}/{parse_func_hash}.arrow"
with ArrowWriter(path=dataset_file) as writer:
for responses_file in responses_files:
@@ -319,14 +319,34 @@ def create_dataset_files(
writer.write(row)
- logger.info(f"Read {total_responses_count} responses, {failed_responses_count} failed")
+ logger.info("Finalizing writer")
+ writer.finalize()
+
+ logger.info(f"Read {total_responses_count} responses.")
if failed_responses_count == total_responses_count:
os.remove(dataset_file)
raise ValueError("All requests failed")
+ if failed_responses_count > 0:
+ logger.warning(f"{failed_responses_count} requests failed.")
+
+ if self.require_all_responses:
+ # all responses succeeded
+ if failed_responses_count > 0:
+ os.remove(dataset_file)
+ raise ValueError(
+ f"{failed_responses_count} requests failed and require_all_responses is True"
+ )
- logger.info("Finalizing writer")
-
- writer.finalize()
+ # number of responses matches number of requests
+ request_files = glob.glob(f"{working_dir}/requests_*.jsonl")
+ n_requests = 0
+ for request_file in request_files:
+ n_requests += len(open(request_file, "r").readlines())
+ if n_requests != total_responses_count:
+ os.remove(dataset_file)
+ raise ValueError(
+ f"Some requests do not have responses and require_all_responses is True. n_requests is {n_requests} and n_responses is {total_responses_count}"
+ )
return Dataset.from_file(dataset_file)
diff --git a/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py b/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
index 9c4131c3..6a8f8c81 100644
--- a/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
@@ -49,6 +49,7 @@ def __init__(
frequency_penalty: Optional[float] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
+ require_all_responses: bool = False,
):
super().__init__(
model=model,
@@ -58,6 +59,7 @@ def __init__(
frequency_penalty=frequency_penalty,
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
+ require_all_responses=require_all_responses,
)
self.client = instructor.from_litellm(litellm.acompletion)
diff --git a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
index 202c214c..37be0c95 100644
--- a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
@@ -47,6 +47,7 @@ def __init__(
url: str = "https://api.openai.com/v1/chat/completions",
presence_penalty: float | None = None,
frequency_penalty: float | None = None,
+ require_all_responses: bool = False,
):
if batch_size > MAX_REQUESTS_PER_BATCH:
raise ValueError(
@@ -54,7 +55,7 @@ def __init__(
f"{MAX_REQUESTS_PER_BATCH:,} requests per batch that OpenAI supports. "
f"Please set your batch_size to be less than or equal to {MAX_REQUESTS_PER_BATCH:,}."
)
- super().__init__(batch_size)
+ super().__init__(batch_size, require_all_responses=require_all_responses)
self.model = model
self.url: str = url
self.check_interval: int = batch_check_interval
diff --git a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
index 7501d0a1..c53454b5 100644
--- a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
@@ -81,6 +81,7 @@ def __init__(
frequency_penalty: Optional[float] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
+ require_all_responses: bool = False,
):
super().__init__(
model=model,
@@ -90,6 +91,7 @@ def __init__(
frequency_penalty=frequency_penalty,
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
+ require_all_responses=require_all_responses,
)
self.url = url
self.api_key = api_key
From 1a60a89a29344586507a125419bf6856ebc4834b Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 10:09:40 -0800
Subject: [PATCH 08/34] update logging
---
.../base_request_processor.py | 27 +++++++++----------
1 file changed, 13 insertions(+), 14 deletions(-)
diff --git a/src/bespokelabs/curator/request_processor/base_request_processor.py b/src/bespokelabs/curator/request_processor/base_request_processor.py
index ca79b9e1..aef8ac59 100644
--- a/src/bespokelabs/curator/request_processor/base_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_request_processor.py
@@ -326,26 +326,25 @@ def create_dataset_files(
if failed_responses_count == total_responses_count:
os.remove(dataset_file)
raise ValueError("All requests failed")
+
if failed_responses_count > 0:
logger.warning(f"{failed_responses_count} requests failed.")
-
- if self.require_all_responses:
- # all responses succeeded
- if failed_responses_count > 0:
+ if self.require_all_responses:
os.remove(dataset_file)
- raise ValueError(
- f"{failed_responses_count} requests failed and require_all_responses is True"
- )
+ raise ValueError(f"Some requests failed and require_all_responses is True")
+
+ # number of responses matches number of requests
+ request_files = glob.glob(f"{working_dir}/requests_*.jsonl")
+ n_requests = 0
+ for request_file in request_files:
+ n_requests += len(open(request_file, "r").readlines())
- # number of responses matches number of requests
- request_files = glob.glob(f"{working_dir}/requests_*.jsonl")
- n_requests = 0
- for request_file in request_files:
- n_requests += len(open(request_file, "r").readlines())
- if n_requests != total_responses_count:
+ if n_requests != total_responses_count:
+ logger.warning(f"{n_requests - total_responses_count} requests do not have responses. n_requests is {n_requests} and n_responses is {total_responses_count}")
+ if self.require_all_responses:
os.remove(dataset_file)
raise ValueError(
- f"Some requests do not have responses and require_all_responses is True. n_requests is {n_requests} and n_responses is {total_responses_count}"
+ f"Some requests do not have responses and require_all_responses is True."
)
return Dataset.from_file(dataset_file)
From 44bb6c06440955e12a8dae45c99dbe339a3d244f Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 10:21:01 -0800
Subject: [PATCH 09/34] fast line counting in python
---
.../request_processor/base_request_processor.py | 9 ++++++---
.../openai_batch_request_processor.py | 4 ++--
src/bespokelabs/curator/utils.py | 12 ++++++++++++
3 files changed, 20 insertions(+), 5 deletions(-)
create mode 100644 src/bespokelabs/curator/utils.py
diff --git a/src/bespokelabs/curator/request_processor/base_request_processor.py b/src/bespokelabs/curator/request_processor/base_request_processor.py
index aef8ac59..e278ab7b 100644
--- a/src/bespokelabs/curator/request_processor/base_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_request_processor.py
@@ -14,6 +14,7 @@
from datasets.arrow_writer import ArrowWriter
from pydantic import BaseModel, ValidationError
+from bespokelabs.curator.utils import count_lines
from bespokelabs.curator.llm.prompt_formatter import PromptFormatter
from bespokelabs.curator.request_processor.event_loop import run_in_event_loop
from bespokelabs.curator.request_processor.generic_request import GenericRequest
@@ -326,7 +327,7 @@ def create_dataset_files(
if failed_responses_count == total_responses_count:
os.remove(dataset_file)
raise ValueError("All requests failed")
-
+
if failed_responses_count > 0:
logger.warning(f"{failed_responses_count} requests failed.")
if self.require_all_responses:
@@ -337,10 +338,12 @@ def create_dataset_files(
request_files = glob.glob(f"{working_dir}/requests_*.jsonl")
n_requests = 0
for request_file in request_files:
- n_requests += len(open(request_file, "r").readlines())
+ n_requests += count_lines(request_file)
if n_requests != total_responses_count:
- logger.warning(f"{n_requests - total_responses_count} requests do not have responses. n_requests is {n_requests} and n_responses is {total_responses_count}")
+ logger.warning(
+ f"{n_requests - total_responses_count} requests do not have responses. n_requests is {n_requests} and n_responses is {total_responses_count}"
+ )
if self.require_all_responses:
os.remove(dataset_file)
raise ValueError(
diff --git a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
index 37be0c95..fc679501 100644
--- a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
@@ -12,6 +12,7 @@
from openai.types import Batch
from tqdm import tqdm
+from bespokelabs.curator.utils import count_lines
from bespokelabs.curator.dataset import Dataset
from bespokelabs.curator.llm.prompt_formatter import PromptFormatter
from bespokelabs.curator.request_processor.base_request_processor import (
@@ -776,8 +777,7 @@ async def track_already_submitted_batches(self):
# Edge case where the batch is still validating, and we need to know the total number of requests
if batch_object.status == "validating":
- n_requests = len(open(request_file_name, "r").readlines())
- batch_object.request_counts.total = n_requests
+ batch_object.request_counts.total = count_lines(request_file_name)
else:
n_requests = batch_object.request_counts.total
diff --git a/src/bespokelabs/curator/utils.py b/src/bespokelabs/curator/utils.py
new file mode 100644
index 00000000..24cfa204
--- /dev/null
+++ b/src/bespokelabs/curator/utils.py
@@ -0,0 +1,12 @@
+# https://stackoverflow.com/questions/845058/how-to-get-the-line-count-of-a-large-file-cheaply-in-python
+def _file_gen(reader):
+ b = reader(1024 * 1024)
+ while b:
+ yield b
+ b = reader(1024 * 1024)
+
+
+def count_lines(filename):
+ f = open(filename, "rb")
+ f_gen = _file_gen(f.raw.read)
+ return sum(buf.count(b"\n") for buf in f_gen)
From 12da38eaf445e8e6dcc066b7b0e6c4af3ef1ebc5 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 10:27:32 -0800
Subject: [PATCH 10/34] move line count to another file
---
src/bespokelabs/curator/utils.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/bespokelabs/curator/utils.py b/src/bespokelabs/curator/utils.py
index 24cfa204..444c324d 100644
--- a/src/bespokelabs/curator/utils.py
+++ b/src/bespokelabs/curator/utils.py
@@ -1,4 +1,5 @@
# https://stackoverflow.com/questions/845058/how-to-get-the-line-count-of-a-large-file-cheaply-in-python
+# https://stackoverflow.com/a/68385697
def _file_gen(reader):
b = reader(1024 * 1024)
while b:
@@ -6,6 +7,7 @@ def _file_gen(reader):
b = reader(1024 * 1024)
+# instead of requiring counting lines, we can store metadata file that has the number of requests in each file
def count_lines(filename):
f = open(filename, "rb")
f_gen = _file_gen(f.raw.read)
From e7ab3510f51a88359af3067c4fdf6395adf62823 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 13:03:29 -0800
Subject: [PATCH 11/34] add max retries arg
---
src/bespokelabs/curator/llm/llm.py | 6 ++++++
.../base_online_request_processor.py | 16 ++++++++++------
.../openai_batch_request_processor.py | 12 ++++++++++--
tests/simple_online.py | 2 ++
4 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index 5df35b5e..9cab6c89 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -102,6 +102,7 @@ def __init__(
top_p: Optional[float] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
+ max_retries: Optional[int] = None,
):
"""Initialize a LLM.
@@ -146,12 +147,14 @@ def __init__(
self._setup_request_processor(
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
+ max_retries=max_retries,
)
def _setup_request_processor(
self,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
+ max_retries: Optional[int] = None,
):
"""Set up the appropriate request processor based on current config.
@@ -196,6 +199,7 @@ def _setup_request_processor(
frequency_penalty=self.frequency_penalty,
delete_successful_batch_files=self._batch_config.delete_successful_batch_files,
delete_failed_batch_files=self._batch_config.delete_failed_batch_files,
+ max_retries=max_retries,
)
elif self.backend == "openai":
self._request_processor = OpenAIOnlineRequestProcessor(
@@ -206,6 +210,7 @@ def _setup_request_processor(
frequency_penalty=self.frequency_penalty,
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
+ max_retries=max_retries,
)
elif self.backend == "litellm":
if is_batch_mode:
@@ -220,6 +225,7 @@ def _setup_request_processor(
frequency_penalty=self.frequency_penalty,
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
+ max_retries=max_retries,
)
else:
raise ValueError(f"Unknown backend: {self.backend}")
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index f4d9691b..a46df943 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -125,9 +125,9 @@ def __init__(
frequency_penalty: Optional[float] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
- require_all_responses: bool = False,
+ max_retries: Optional[int] = None,
):
- super().__init__(batch_size=None, require_all_responses=require_all_responses)
+ super().__init__(batch_size=None)
self.model: str = model
self.temperature: float | None = temperature
self.top_p: float | None = top_p
@@ -136,6 +136,10 @@ def __init__(
self.prompt_formatter: Optional[PromptFormatter] = None
self.max_requests_per_minute: Optional[int] = max_requests_per_minute
self.max_tokens_per_minute: Optional[int] = max_tokens_per_minute
+ if max_retries is None:
+ self.max_retries = DEFAULT_MAX_RETRIES
+ else:
+ self.max_retries = max_retries
def get_rate_limit(self, name, header_value):
"""Uses manual values if set, otherwise uses headers if available, and if not available uses defaults."""
@@ -220,7 +224,7 @@ def run(
self.process_requests_from_file(
generic_request_filepath=request_file,
save_filepath=response_file,
- max_attempts=DEFAULT_MAX_RETRIES,
+ max_attempts=self.max_retries,
resume=True,
)
)
@@ -387,10 +391,10 @@ async def process_requests_from_file(
token_estimate = self.estimate_total_tokens(
retry_request.generic_request.messages
)
- attempt_number = 1 + DEFAULT_MAX_RETRIES - retry_request.attempts_left
+ attempt_number = 1 + self.max_retries - retry_request.attempts_left
logger.info(
f"Processing retry for request {retry_request.task_id} "
- f"(attempt #{attempt_number} of {DEFAULT_MAX_RETRIES}). "
+ f"(attempt #{attempt_number} of {self.max_retries}). "
f"Previous errors: {retry_request.result}"
)
@@ -479,7 +483,7 @@ async def handle_single_request_with_retries(
retry_queue.put_nowait(request)
else:
logger.error(
- f"Request {request.task_id} failed permanently after exhausting all {DEFAULT_MAX_RETRIES} retry attempts. "
+ f"Request {request.task_id} failed permanently after exhausting all {self.max_retries} retry attempts. "
f"Errors: {[str(e) for e in request.result]}"
)
generic_response = GenericResponse(
diff --git a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
index fc679501..7588a852 100644
--- a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
@@ -5,7 +5,7 @@
import logging
import os
from dataclasses import dataclass, field
-from typing import Callable
+from typing import Callable, Optional
import litellm
from openai import AsyncOpenAI, NotFoundError
@@ -49,6 +49,7 @@ def __init__(
presence_penalty: float | None = None,
frequency_penalty: float | None = None,
require_all_responses: bool = False,
+ max_retries: Optional[int] = None,
):
if batch_size > MAX_REQUESTS_PER_BATCH:
raise ValueError(
@@ -66,6 +67,10 @@ def __init__(
self.frequency_penalty: float | None = frequency_penalty
self.delete_successful_batch_files: bool = delete_successful_batch_files
self.delete_failed_batch_files: bool = delete_failed_batch_files
+ if max_retries is None:
+ self.max_retries = MAX_RETRIES_PER_OPERATION
+ else:
+ self.max_retries = max_retries
def get_rate_limits(self) -> dict:
"""
@@ -337,6 +342,7 @@ def run(
prompt_formatter,
delete_successful_batch_files=self.delete_successful_batch_files,
delete_failed_batch_files=self.delete_failed_batch_files,
+ max_retries=self.max_retries,
)
run_in_event_loop(self.run_batch_operations(batch_manager, request_files))
@@ -355,6 +361,7 @@ def cancel_batches(self, working_dir: str) -> Dataset:
self.check_interval,
delete_successful_batch_files=self.delete_successful_batch_files,
delete_failed_batch_files=self.delete_failed_batch_files,
+ max_retries=self.max_retries,
)
run_in_event_loop(batch_manager.cancel_batches())
@@ -512,6 +519,7 @@ def __init__(
prompt_formatter: PromptFormatter | None = None,
delete_successful_batch_files: bool = False,
delete_failed_batch_files: bool = False,
+ max_retries: Optional[int] = None,
) -> None:
"""Initialize BatchManager to handle OpenAI batch processing operations.
@@ -525,7 +533,7 @@ def __init__(
delete_failed_batch_files (bool): Whether to delete input/error files from OpenAI
after batch failure.
"""
- self.client = AsyncOpenAI(max_retries=MAX_RETRIES_PER_OPERATION)
+ self.client = AsyncOpenAI(max_retries=max_retries)
self.check_interval = check_interval
self.working_dir = working_dir
self.tracker = BatchStatusTracker()
diff --git a/tests/simple_online.py b/tests/simple_online.py
index fd850592..601c280f 100644
--- a/tests/simple_online.py
+++ b/tests/simple_online.py
@@ -18,6 +18,7 @@ def main(args):
model_name=args.model,
max_requests_per_minute=args.max_requests_per_minute,
max_tokens_per_minute=args.max_tokens_per_minute,
+ max_retries=args.max_retries,
)
dataset = prompter(dataset, batch_cancel=args.cancel)
@@ -41,5 +42,6 @@ def main(args):
parser.add_argument(
"--max-tokens-per-minute", type=int, help="Max tokens per minute", default=None
)
+ parser.add_argument("--max-retries", type=int, help="Max retries", default=None)
args = parser.parse_args()
main(args)
From 3a2bc17f9fdd5e9a9b00817bd0f0d90492a2039c Mon Sep 17 00:00:00 2001
From: Mahesh Sathiamoorthy
Date: Fri, 13 Dec 2024 16:11:59 -0500
Subject: [PATCH 12/34] Revert "Refactor LLM class to use context manager for
batch parameters"
---
src/bespokelabs/curator/llm/batch.py | 104 ------------------
src/bespokelabs/curator/llm/llm.py | 152 +++++++++++----------------
tests/batch/simple_batch.py | 13 +--
3 files changed, 66 insertions(+), 203 deletions(-)
delete mode 100644 src/bespokelabs/curator/llm/batch.py
diff --git a/src/bespokelabs/curator/llm/batch.py b/src/bespokelabs/curator/llm/batch.py
deleted file mode 100644
index a01cc79c..00000000
--- a/src/bespokelabs/curator/llm/batch.py
+++ /dev/null
@@ -1,104 +0,0 @@
-"""Batch processing functionality for LLM."""
-
-from dataclasses import dataclass
-from typing import Optional, TYPE_CHECKING
-
-if TYPE_CHECKING:
- from .llm import LLM
-
-
-@dataclass
-class BatchConfig:
- """Configuration for batch processing in LLM.
-
- This class holds all configuration parameters related to batch processing,
- used by the LLM context manager for batch operations.
-
- Args:
- batch_size: Maximum number of requests per batch. If None, defaults to 1,000.
- batch_check_interval: How often to check batch status, in seconds.
- delete_successful_batch_files: Whether to delete batch files after successful processing.
- delete_failed_batch_files: Whether to delete batch files after failed processing.
- """
-
- batch_size: Optional[int] = None
- batch_check_interval: int = 60
- delete_successful_batch_files: bool = True
- delete_failed_batch_files: bool = False
-
-
-class BatchContext:
- """Context manager for batch processing.
-
- This class provides a context manager interface for batch processing with LLM instances.
- It handles the setup and teardown of batch processing configuration, ensuring proper
- state management of the LLM instance.
-
- Example:
- ```python
- from bespokelabs.curator.llm import LLM, batch
-
- llm = LLM(...)
- with batch(llm, batch_size=100):
- results = llm(dataset)
- ```
- """
-
- def __init__(self, llm: "LLM", **kwargs):
- """Initialize batch context.
-
- Args:
- llm: The LLM instance to use for batch processing.
- **kwargs: Batch configuration parameters passed to BatchConfig.
- """
- self.llm = llm
- self.config = BatchConfig(**kwargs)
- self._original_processor = None
-
- def __enter__(self):
- """Enter batch context.
-
- Returns:
- The LLM instance configured for batch processing.
-
- Raises:
- RuntimeError: If already in a batch context.
- """
- if hasattr(self.llm, "_batch_config") and self.llm._batch_config is not None:
- raise RuntimeError("Already in batch context")
- self.llm._batch_config = self.config
- self.llm._setup_request_processor()
- return self.llm
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- """Exit batch context and restore original request processor."""
- self.llm._batch_config = None
- if self.llm._original_request_processor is not None:
- self.llm._request_processor = self.llm._original_request_processor
-
-
-def batch(llm: "LLM", **kwargs) -> BatchContext:
- """Create a batch processing context for an LLM instance.
-
- This function creates a context manager that configures an LLM instance for
- batch processing. The batch processing configuration is active only within
- the context manager's scope.
-
- Args:
- llm: The LLM instance to configure for batch processing.
- **kwargs: Configuration parameters for batch processing.
- See BatchConfig for available parameters.
-
- Returns:
- A BatchContext instance that can be used as a context manager.
-
- Example:
- ```python
- from bespokelabs.curator.llm import LLM, batch
-
- llm = LLM(...)
- with batch(llm, batch_size=100):
- results = llm(dataset)
- ```
- """
- return BatchContext(llm, **kwargs)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index eec61f2c..59ceb4cc 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -13,7 +13,6 @@
from xxhash import xxh64
from bespokelabs.curator.db import MetadataDB
-from bespokelabs.curator.llm.batch import BatchConfig
from bespokelabs.curator.llm.prompt_formatter import PromptFormatter
from bespokelabs.curator.request_processor.base_request_processor import (
BaseRequestProcessor,
@@ -83,6 +82,11 @@ def __init__(
backend: Optional[str] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
+ batch: bool = False,
+ batch_size: Optional[int] = None,
+ batch_check_interval: Optional[int] = 60,
+ delete_successful_batch_files: bool = True,
+ delete_failed_batch_files: bool = False, # To allow users to debug failed batches
temperature: Optional[float] = None,
top_p: Optional[float] = None,
presence_penalty: Optional[float] = None,
@@ -99,110 +103,76 @@ def __init__(
response_format: A Pydantic model specifying the
response format from the LLM.
backend: The backend to use ("openai" or "litellm"). If None, will be auto-determined
- max_requests_per_minute: Maximum requests per minute (not supported in batch mode)
- max_tokens_per_minute: Maximum tokens per minute (not supported in batch mode)
- temperature: The temperature to use for the LLM
- top_p: The top_p to use for the LLM
- presence_penalty: The presence_penalty to use for the LLM
- frequency_penalty: The frequency_penalty to use for the LLM
+ batch: Whether to use batch processing
+ batch_size: The size of the batch to use, only used if batch is True
+ batch_check_interval: The interval to check for batch completions, only used if batch is True
+ delete_successful_batch_files: Whether to delete successful batch files, only used if batch is True
+ delete_failed_batch_files: Whether to delete failed batch files, only used if batch is True
+ temperature: The temperature to use for the LLM, only used if batch is False
+ top_p: The top_p to use for the LLM, only used if batch is False
+ presence_penalty: The presence_penalty to use for the LLM, only used if batch is False
+ frequency_penalty: The frequency_penalty to use for the LLM, only used if batch is False
"""
self.prompt_formatter = PromptFormatter(
model_name, prompt_func, parse_func, response_format
)
-
- # Initialize context manager state
- self._batch_config = None
- self._original_request_processor = None
-
- # Store model parameters
- self.temperature = temperature
- self.top_p = top_p
- self.presence_penalty = presence_penalty
- self.frequency_penalty = frequency_penalty
- self.model_name = model_name
+ self.batch_mode = batch
# Auto-determine backend if not specified
+ # Use provided backend or auto-determine based on model and format
if backend is not None:
self.backend = backend
else:
self.backend = self._determine_backend(model_name, response_format)
- # Initialize request processor
- self._setup_request_processor(
- max_requests_per_minute=max_requests_per_minute,
- max_tokens_per_minute=max_tokens_per_minute,
- )
-
- def _setup_request_processor(
- self,
- max_requests_per_minute: Optional[int] = None,
- max_tokens_per_minute: Optional[int] = None,
- ):
- """Set up the appropriate request processor based on current config.
-
- This method initializes the request processor based on the current configuration,
- including batch mode settings if a batch context is active. It handles both
- OpenAI and LiteLLM backends, with appropriate processor initialization.
-
- The batch configuration is managed by the external BatchContext class, which
- sets self._batch_config when entering the context and clears it when exiting.
-
- Args:
- max_requests_per_minute: Maximum requests per minute (not supported in batch mode)
- max_tokens_per_minute: Maximum tokens per minute (not supported in batch mode)
- """
- # Store current processor before potentially switching to batch mode
- if hasattr(self, "_request_processor"):
- self._original_request_processor = self._request_processor
-
- # Check if we're in batch mode via external BatchContext
- is_batch_mode = self._batch_config is not None
-
- # If we already have a batch processor of the same type, keep it to maintain state
- if (
- is_batch_mode
- and hasattr(self, "_request_processor")
- and isinstance(self._request_processor, OpenAIBatchRequestProcessor)
- ):
- return
-
- if is_batch_mode and self.backend == "openai":
- if max_requests_per_minute is not None or max_tokens_per_minute is not None:
- logger.warning(
- "max_requests_per_minute and max_tokens_per_minute not supported with batch mode"
+ # Select request processor based on backend
+ if self.backend == "openai":
+ if batch:
+ if batch_size is None:
+ batch_size = 1_000
+ logger.info(
+ f"batch=True but no batch_size provided, using default batch_size of {batch_size:,}"
+ )
+ if max_requests_per_minute is not None or max_tokens_per_minute is not None:
+ logger.warning(
+ "max_requests_per_minute and max_tokens_per_minute not supported with batch mode"
+ )
+ self._request_processor = OpenAIBatchRequestProcessor(
+ model=model_name,
+ batch_size=batch_size,
+ temperature=temperature,
+ top_p=top_p,
+ batch_check_interval=batch_check_interval,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ delete_successful_batch_files=delete_successful_batch_files,
+ delete_failed_batch_files=delete_failed_batch_files,
+ )
+ else:
+ if batch_size is not None:
+ logger.warning(
+ f"LLM argument `batch_size` {batch_size} is ignored because `batch` is False"
+ )
+ self._request_processor = OpenAIOnlineRequestProcessor(
+ model=model_name,
+ temperature=temperature,
+ top_p=top_p,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ max_requests_per_minute=max_requests_per_minute,
+ max_tokens_per_minute=max_tokens_per_minute,
)
- self._request_processor = OpenAIBatchRequestProcessor(
- model=self.model_name,
- batch_size=self._batch_config.batch_size or 1_000,
- batch_check_interval=self._batch_config.batch_check_interval,
- temperature=self.temperature,
- top_p=self.top_p,
- presence_penalty=self.presence_penalty,
- frequency_penalty=self.frequency_penalty,
- delete_successful_batch_files=self._batch_config.delete_successful_batch_files,
- delete_failed_batch_files=self._batch_config.delete_failed_batch_files,
- )
- elif self.backend == "openai":
- self._request_processor = OpenAIOnlineRequestProcessor(
- model=self.model_name,
- temperature=self.temperature,
- top_p=self.top_p,
- presence_penalty=self.presence_penalty,
- frequency_penalty=self.frequency_penalty,
- max_requests_per_minute=max_requests_per_minute,
- max_tokens_per_minute=max_tokens_per_minute,
- )
elif self.backend == "litellm":
- if is_batch_mode:
+ if batch:
logger.warning(
- "Batch mode is not supported with LiteLLM backend, ignoring batch context"
+ "Batch mode is not supported with LiteLLM backend, ignoring batch=True"
)
self._request_processor = LiteLLMOnlineRequestProcessor(
- model=self.model_name,
- temperature=self.temperature,
- top_p=self.top_p,
- presence_penalty=self.presence_penalty,
- frequency_penalty=self.frequency_penalty,
+ model=model_name,
+ temperature=temperature,
+ top_p=top_p,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
)
@@ -278,7 +248,7 @@ def _completions(
if self.prompt_formatter.response_format
else "text"
),
- str(self._batch_config is not None),
+ str(self.batch_mode),
str(self.backend),
]
)
@@ -308,7 +278,7 @@ def _completions(
else "text"
),
"run_hash": fingerprint,
- "batch_mode": self._batch_config is not None,
+ "batch_mode": self.batch_mode,
}
metadata_db.store_metadata(metadata_dict)
diff --git a/tests/batch/simple_batch.py b/tests/batch/simple_batch.py
index 9ad767e7..251296ae 100644
--- a/tests/batch/simple_batch.py
+++ b/tests/batch/simple_batch.py
@@ -1,5 +1,4 @@
from bespokelabs.curator import LLM
-from bespokelabs.curator.llm.batch import batch
from datasets import Dataset
import logging
import argparse
@@ -16,16 +15,14 @@ def main(args):
prompter = LLM(
prompt_func=lambda row: row["prompt"],
- model_name="gpt-3.5-turbo",
+ model_name="gpt-4o-mini",
response_format=None,
- )
-
- with batch(
- prompter,
+ batch=True,
batch_size=args.batch_size,
batch_check_interval=args.batch_check_interval,
- ):
- dataset = prompter(dataset, batch_cancel=args.cancel)
+ )
+
+ dataset = prompter(dataset, batch_cancel=args.cancel)
print(dataset.to_pandas())
From f2b99c6c8305767245b15e6f9b9d159c8c3bacc7 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 13:15:10 -0800
Subject: [PATCH 13/34] tests
---
src/bespokelabs/curator/llm/llm.py | 98 ++++++++++---------
.../base_online_request_processor.py | 3 +-
.../litellm_online_request_processor.py | 2 +
.../openai_online_request_processor.py | 2 +
tests/simple_online.py | 7 ++
5 files changed, 67 insertions(+), 45 deletions(-)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index 9cab6c89..c6df56bd 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -37,50 +37,6 @@
class LLM:
"""Interface for prompting LLMs."""
- @staticmethod
- def _determine_backend(
- model_name: str, response_format: Optional[Type[BaseModel]] = None
- ) -> str:
- """Determine which backend to use based on model name and response format.
-
- Args:
- model_name (str): Name of the model
- response_format (Optional[Type[BaseModel]]): Response format if specified
-
- Returns:
- str: Backend to use ("openai" or "litellm")
- """
- model_name = model_name.lower()
-
- # GPT-4o models with response format should use OpenAI
- if (
- response_format
- and OpenAIOnlineRequestProcessor(model_name).check_structured_output_support()
- ):
- logger.info(f"Requesting structured output from {model_name}, using OpenAI backend")
- return "openai"
-
- # GPT models and O1 models without response format should use OpenAI
- if not response_format and any(x in model_name for x in ["gpt-", "o1-preview", "o1-mini"]):
- logger.info(f"Requesting text output from {model_name}, using OpenAI backend")
- return "openai"
-
- # Default to LiteLLM for all other cases
- logger.info(
- f"Requesting {f'structured' if response_format else 'text'} output from {model_name}, using LiteLLM backend"
- )
- return "litellm"
-
- @staticmethod
- def _convert_response_to_dict(response):
- if hasattr(response, "model_dump"):
- return response.model_dump()
- elif isinstance(response, dict):
- return response
- elif hasattr(response, "__dict__"):
- return response.__dict__
- return response
-
def __init__(
self,
model_name: str,
@@ -103,6 +59,7 @@ def __init__(
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
max_retries: Optional[int] = None,
+ require_all_responses: Optional[bool] = None,
):
"""Initialize a LLM.
@@ -121,6 +78,8 @@ def __init__(
top_p: The top_p to use for the LLM
presence_penalty: The presence_penalty to use for the LLM
frequency_penalty: The frequency_penalty to use for the LLM
+ max_retries: The maximum number of retries to use for the LLM
+ require_all_responses: Whether to require all responses
"""
self.prompt_formatter = PromptFormatter(
model_name, prompt_func, parse_func, response_format
@@ -148,13 +107,59 @@ def __init__(
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
max_retries=max_retries,
+ require_all_responses=require_all_responses,
)
+ @staticmethod
+ def _determine_backend(
+ model_name: str, response_format: Optional[Type[BaseModel]] = None
+ ) -> str:
+ """Determine which backend to use based on model name and response format.
+
+ Args:
+ model_name (str): Name of the model
+ response_format (Optional[Type[BaseModel]]): Response format if specified
+
+ Returns:
+ str: Backend to use ("openai" or "litellm")
+ """
+ model_name = model_name.lower()
+
+ # GPT-4o models with response format should use OpenAI
+ if (
+ response_format
+ and OpenAIOnlineRequestProcessor(model_name).check_structured_output_support()
+ ):
+ logger.info(f"Requesting structured output from {model_name}, using OpenAI backend")
+ return "openai"
+
+ # GPT models and O1 models without response format should use OpenAI
+ if not response_format and any(x in model_name for x in ["gpt-", "o1-preview", "o1-mini"]):
+ logger.info(f"Requesting text output from {model_name}, using OpenAI backend")
+ return "openai"
+
+ # Default to LiteLLM for all other cases
+ logger.info(
+ f"Requesting {f'structured' if response_format else 'text'} output from {model_name}, using LiteLLM backend"
+ )
+ return "litellm"
+
+ @staticmethod
+ def _convert_response_to_dict(response):
+ if hasattr(response, "model_dump"):
+ return response.model_dump()
+ elif isinstance(response, dict):
+ return response
+ elif hasattr(response, "__dict__"):
+ return response.__dict__
+ return response
+
def _setup_request_processor(
self,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
max_retries: Optional[int] = None,
+ require_all_responses: Optional[bool] = None,
):
"""Set up the appropriate request processor based on current config.
@@ -168,6 +173,8 @@ def _setup_request_processor(
Args:
max_requests_per_minute: Maximum requests per minute (not supported in batch mode)
max_tokens_per_minute: Maximum tokens per minute (not supported in batch mode)
+ max_retries: The maximum number of retries to use for the LLM
+ require_all_responses: Whether to require all responses
"""
# Store current processor before potentially switching to batch mode
if hasattr(self, "_request_processor"):
@@ -200,6 +207,7 @@ def _setup_request_processor(
delete_successful_batch_files=self._batch_config.delete_successful_batch_files,
delete_failed_batch_files=self._batch_config.delete_failed_batch_files,
max_retries=max_retries,
+ require_all_responses=require_all_responses,
)
elif self.backend == "openai":
self._request_processor = OpenAIOnlineRequestProcessor(
@@ -211,6 +219,7 @@ def _setup_request_processor(
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
max_retries=max_retries,
+ require_all_responses=require_all_responses,
)
elif self.backend == "litellm":
if is_batch_mode:
@@ -226,6 +235,7 @@ def _setup_request_processor(
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
max_retries=max_retries,
+ require_all_responses=require_all_responses,
)
else:
raise ValueError(f"Unknown backend: {self.backend}")
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index a46df943..44eb32c1 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -125,9 +125,10 @@ def __init__(
frequency_penalty: Optional[float] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
+ require_all_responses: bool = False,
max_retries: Optional[int] = None,
):
- super().__init__(batch_size=None)
+ super().__init__(batch_size=None, require_all_responses=require_all_responses)
self.model: str = model
self.temperature: float | None = temperature
self.top_p: float | None = top_p
diff --git a/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py b/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
index 6a8f8c81..7e18f6bc 100644
--- a/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
@@ -50,6 +50,7 @@ def __init__(
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
require_all_responses: bool = False,
+ max_retries: Optional[int] = None,
):
super().__init__(
model=model,
@@ -60,6 +61,7 @@ def __init__(
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
require_all_responses=require_all_responses,
+ max_retries=max_retries,
)
self.client = instructor.from_litellm(litellm.acompletion)
diff --git a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
index c53454b5..d5cec379 100644
--- a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
@@ -82,6 +82,7 @@ def __init__(
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
require_all_responses: bool = False,
+ max_retries: Optional[int] = None,
):
super().__init__(
model=model,
@@ -92,6 +93,7 @@ def __init__(
max_requests_per_minute=max_requests_per_minute,
max_tokens_per_minute=max_tokens_per_minute,
require_all_responses=require_all_responses,
+ max_retries=max_retries,
)
self.url = url
self.api_key = api_key
diff --git a/tests/simple_online.py b/tests/simple_online.py
index 601c280f..12731eb3 100644
--- a/tests/simple_online.py
+++ b/tests/simple_online.py
@@ -19,6 +19,7 @@ def main(args):
max_requests_per_minute=args.max_requests_per_minute,
max_tokens_per_minute=args.max_tokens_per_minute,
max_retries=args.max_retries,
+ require_all_responses=args.require_all_responses,
)
dataset = prompter(dataset, batch_cancel=args.cancel)
@@ -43,5 +44,11 @@ def main(args):
"--max-tokens-per-minute", type=int, help="Max tokens per minute", default=None
)
parser.add_argument("--max-retries", type=int, help="Max retries", default=None)
+ parser.add_argument(
+ "--require-all-responses",
+ action="store_true",
+ default=None,
+ help="Require all responses",
+ )
args = parser.parse_args()
main(args)
From a2adafbce620122a2b9d711b91210136d67acd75 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 14:12:01 -0800
Subject: [PATCH 14/34] addressing comments
---
src/bespokelabs/curator/{utils.py => file_utilities.py} | 2 +-
src/bespokelabs/curator/llm/llm.py | 2 +-
.../request_processor/base_online_request_processor.py | 2 +-
.../curator/request_processor/base_request_processor.py | 4 ++--
.../request_processor/openai_batch_request_processor.py | 4 ++--
.../request_processor/openai_online_request_processor.py | 2 +-
6 files changed, 8 insertions(+), 8 deletions(-)
rename src/bespokelabs/curator/{utils.py => file_utilities.py} (87%)
diff --git a/src/bespokelabs/curator/utils.py b/src/bespokelabs/curator/file_utilities.py
similarity index 87%
rename from src/bespokelabs/curator/utils.py
rename to src/bespokelabs/curator/file_utilities.py
index 444c324d..6ee606e7 100644
--- a/src/bespokelabs/curator/utils.py
+++ b/src/bespokelabs/curator/file_utilities.py
@@ -7,7 +7,7 @@ def _file_gen(reader):
b = reader(1024 * 1024)
-# instead of requiring counting lines, we can store metadata file that has the number of requests in each file
+# Instead of requiring counting lines, we can store metadata file that has the number of requests in each file
def count_lines(filename):
f = open(filename, "rb")
f_gen = _file_gen(f.raw.read)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index f5589120..08bad431 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -78,7 +78,7 @@ def __init__(
top_p: The top_p to use for the LLM
presence_penalty: The presence_penalty to use for the LLM
frequency_penalty: The frequency_penalty to use for the LLM
- max_retries: The maximum number of retries to use for the LLM
+ max_retries: The maximum number of retries to use for the LLM. If 0, will only try a request once.
require_all_responses: Whether to require all responses
"""
self.prompt_formatter = PromptFormatter(
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index 44eb32c1..09c3c432 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -125,7 +125,7 @@ def __init__(
frequency_penalty: Optional[float] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
- require_all_responses: bool = False,
+ require_all_responses: bool = None,
max_retries: Optional[int] = None,
):
super().__init__(batch_size=None, require_all_responses=require_all_responses)
diff --git a/src/bespokelabs/curator/request_processor/base_request_processor.py b/src/bespokelabs/curator/request_processor/base_request_processor.py
index e278ab7b..ddc7d268 100644
--- a/src/bespokelabs/curator/request_processor/base_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_request_processor.py
@@ -14,7 +14,7 @@
from datasets.arrow_writer import ArrowWriter
from pydantic import BaseModel, ValidationError
-from bespokelabs.curator.utils import count_lines
+from bespokelabs.curator.file_utilities import count_lines
from bespokelabs.curator.llm.prompt_formatter import PromptFormatter
from bespokelabs.curator.request_processor.event_loop import run_in_event_loop
from bespokelabs.curator.request_processor.generic_request import GenericRequest
@@ -30,7 +30,7 @@ class BaseRequestProcessor(ABC):
Base class for all request processors.
"""
- def __init__(self, batch_size: Optional[int] = None, require_all_responses: bool = False):
+ def __init__(self, batch_size: Optional[int] = None, require_all_responses: bool = True):
self.batch_size = batch_size
self.require_all_responses = require_all_responses
# Increase the number of open file descriptors to avoid "Too many open files" errors
diff --git a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
index 7588a852..b5b64994 100644
--- a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
@@ -12,7 +12,7 @@
from openai.types import Batch
from tqdm import tqdm
-from bespokelabs.curator.utils import count_lines
+from bespokelabs.curator.file_utilities import count_lines
from bespokelabs.curator.dataset import Dataset
from bespokelabs.curator.llm.prompt_formatter import PromptFormatter
from bespokelabs.curator.request_processor.base_request_processor import (
@@ -48,7 +48,7 @@ def __init__(
url: str = "https://api.openai.com/v1/chat/completions",
presence_penalty: float | None = None,
frequency_penalty: float | None = None,
- require_all_responses: bool = False,
+ require_all_responses: bool = None,
max_retries: Optional[int] = None,
):
if batch_size > MAX_REQUESTS_PER_BATCH:
diff --git a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
index d5cec379..2d21e06d 100644
--- a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
@@ -81,7 +81,7 @@ def __init__(
frequency_penalty: Optional[float] = None,
max_requests_per_minute: Optional[int] = None,
max_tokens_per_minute: Optional[int] = None,
- require_all_responses: bool = False,
+ require_all_responses: bool = None,
max_retries: Optional[int] = None,
):
super().__init__(
From 8c9ae102f165f8c24df3f8c037e8a3b1f71f7e7d Mon Sep 17 00:00:00 2001
From: Mahesh Sathiamoorthy
Date: Fri, 13 Dec 2024 22:14:47 +0000
Subject: [PATCH 15/34] Add a simple llm interface.
---
README.md | 21 ++++++++++++---
examples/simple_poem.py | 13 +++++++++
src/bespokelabs/curator/__init__.py | 1 +
src/bespokelabs/curator/llm/simple_llm.py | 32 +++++++++++++++++++++++
4 files changed, 63 insertions(+), 4 deletions(-)
create mode 100644 examples/simple_poem.py
create mode 100644 src/bespokelabs/curator/llm/simple_llm.py
diff --git a/README.md b/README.md
index b7bf7940..2b4da899 100644
--- a/README.md
+++ b/README.md
@@ -29,7 +29,7 @@
-### Overview
+## Overview
Bespoke Curator makes it very easy to create high-quality synthetic data at scale, which you can use to finetune models or use for structured data extraction at scale.
@@ -38,7 +38,7 @@ Bespoke Curator is an open-source project:
* A Curator Viewer which makes it easy to view the datasets, thus aiding in the dataset creation.
* We will also be releasing high-quality datasets that should move the needle on post-training.
-### Key Features
+## Key Features
1. **Programmability and Structured Outputs**: Synthetic data generation is lot more than just using a single prompt -- it involves calling LLMs multiple times and orchestrating control-flow. Curator treats structured outputs as first class citizens and helps you design complex pipelines.
2. **Built-in Performance Optimization**: We often see calling LLMs in loops, or inefficient implementation of multi-threading. We have baked in performance optimizations so that you don't need to worry about those!
@@ -46,20 +46,33 @@ Bespoke Curator is an open-source project:
4. **Native HuggingFace Dataset Integration**: Work directly on HuggingFace Dataset objects throughout your pipeline. Your synthetic data is immediately ready for fine-tuning!
5. **Interactive Curator Viewer**: Improve and iterate on your prompts using our built-in viewer. Inspect LLM requests and responses in real-time, allowing you to iterate and refine your data generation strategy with immediate feedback.
-### Installation
+## Installation
```bash
pip install bespokelabs-curator
```
-### Usage
+## Usage
+### Imports
```python
from bespokelabs import curator
from datasets import Dataset
from pydantic import BaseModel, Field
from typing import List
+```
+### SimpleLLM: A simple interface for calling LLMs
+
+```python
+llm = curator.SimpleLLM(model_name="gpt-4o-mini")
+poem = llm("Write a poem about the bitter lesson in AI and keep it 100 words or less.")
+print(poem)
+```
+
+### LLM: A more complex interface for calling LLMs
+
+```python
# Create a dataset object for the topics you want to create the poems.
topics = Dataset.from_dict({"topic": [
"Urban loneliness in a bustling city",
diff --git a/examples/simple_poem.py b/examples/simple_poem.py
new file mode 100644
index 00000000..474840bf
--- /dev/null
+++ b/examples/simple_poem.py
@@ -0,0 +1,13 @@
+from bespokelabs import curator
+
+# Use GPT-4o-mini for this example.
+llm = curator.SimpleLLM(model_name="gpt-4o-mini")
+
+poem = llm("Write a poem about the bitter lesson in AI and keep it 100 words or less.")
+print(poem)
+
+# Use Claude 3.5 Sonnet for this example.
+llm = curator.SimpleLLM(model_name="claude-3-5-sonnet-20240620", backend="litellm")
+
+poem = llm("Write a sonnet about the bitter lesson in AI and make it visual.")
+print(poem)
diff --git a/src/bespokelabs/curator/__init__.py b/src/bespokelabs/curator/__init__.py
index a9d4cc6e..b02aab46 100644
--- a/src/bespokelabs/curator/__init__.py
+++ b/src/bespokelabs/curator/__init__.py
@@ -1,2 +1,3 @@
from .dataset import Dataset
from .llm.llm import LLM
+from .llm.simple_llm import SimpleLLM
\ No newline at end of file
diff --git a/src/bespokelabs/curator/llm/simple_llm.py b/src/bespokelabs/curator/llm/simple_llm.py
new file mode 100644
index 00000000..9181a614
--- /dev/null
+++ b/src/bespokelabs/curator/llm/simple_llm.py
@@ -0,0 +1,32 @@
+from bespokelabs.curator.llm.llm import LLM
+from datasets import Dataset
+from typing import Union, List
+
+
+class SimpleLLM:
+ """A simpler interface for the LLM class.
+
+ Usage:
+ llm = SimpleLLM(model_name="gpt-4o-mini")
+ llm("Do you know about the bitter lesson?")
+ llm(["What is the capital of France?", "What is the capital of Germany?"])
+ For more complex use cases (e.g. structured outputs and custom prompt functions), see the LLM class.
+ """
+ def __init__(self, model_name: str, backend: str = "openai"):
+ self._model_name = model_name
+ self._backend = backend
+
+ def __call__(self, prompt: Union[str, List[str]]) -> Union[str, List[str]]:
+ prompt_list = [prompt] if isinstance(prompt, str) else prompt
+ dataset: Dataset = Dataset.from_dict({"prompt": prompt_list})
+
+ llm = LLM(
+ prompt_func=lambda row: row['prompt'],
+ model_name=self._model_name,
+ response_format=None,
+ backend=self._backend
+ )
+ response = llm(dataset)
+ if isinstance(prompt, str):
+ return response["response"][0]
+ return response["response"]
From 7674a6d2c9ff4c81e8d4899c6609d696fe6dc1b8 Mon Sep 17 00:00:00 2001
From: Mahesh Sathiamoorthy
Date: Fri, 13 Dec 2024 22:21:12 +0000
Subject: [PATCH 16/34] Update README.
---
README.md | 30 +++++++++++++++++++++++-------
1 file changed, 23 insertions(+), 7 deletions(-)
diff --git a/README.md b/README.md
index 2b4da899..fe3327d2 100644
--- a/README.md
+++ b/README.md
@@ -61,33 +61,49 @@ from datasets import Dataset
from pydantic import BaseModel, Field
from typing import List
```
-### SimpleLLM: A simple interface for calling LLMs
+### `SimpleLLM`: A simple interface for calling LLMs
```python
llm = curator.SimpleLLM(model_name="gpt-4o-mini")
-
poem = llm("Write a poem about the bitter lesson in AI and keep it 100 words or less.")
print(poem)
```
-### LLM: A more complex interface for calling LLMs
+#### Use LiteLLM backend for calling other models
+You can use the [LiteLLM](https://docs.litellm.ai/docs/providers) backend for calling other models.
+
+```python
+llm = curator.SimpleLLM(model_name="claude-3-5-sonnet-20240620", backend="litellm")
+
+poem = llm("Write a sonnet about the bitter lesson in AI and make it visual.")
+print(poem)
+```
+
+### Visualize in Curator Viewer
+Run `curator-viewer` to see the dataset in the viewer.
+
+### `LLM`: A more powerful interface for calling LLMs
+
+Create a dataset object for the topics you want to create the poems.
```python
-# Create a dataset object for the topics you want to create the poems.
topics = Dataset.from_dict({"topic": [
"Urban loneliness in a bustling city",
"Beauty of Bespoke Labs's Curator library"
]})
+```
-# Define a class to encapsulate a list of poems.
+Define a class to encapsulate a list of poems.
+```python
class Poem(BaseModel):
poem: str = Field(description="A poem.")
class Poems(BaseModel):
poems_list: List[Poem] = Field(description="A list of poems.")
+```
-
-# We define an `LLM` object that generates poems which gets applied to the topics dataset.
+We define an `LLM` object that generates poems which gets applied to the topics dataset.
+```python
poet = curator.LLM(
# `prompt_func` takes a row of the dataset as input.
# `row` is a dictionary with a single key 'topic' in this case.
From 286be1e2c3f8be54cf11ebdd2155f49b59654d76 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 14:33:09 -0800
Subject: [PATCH 17/34] rate limit properties
---
.../base_online_request_processor.py | 72 +++++++++----------
.../base_request_processor.py | 10 ---
.../litellm_online_request_processor.py | 9 ++-
.../openai_batch_request_processor.py | 42 -----------
.../openai_online_request_processor.py | 9 ++-
5 files changed, 45 insertions(+), 97 deletions(-)
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index 09c3c432..02f4a79f 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -135,54 +135,48 @@ def __init__(
self.presence_penalty: float | None = presence_penalty
self.frequency_penalty: float | None = frequency_penalty
self.prompt_formatter: Optional[PromptFormatter] = None
- self.max_requests_per_minute: Optional[int] = max_requests_per_minute
- self.max_tokens_per_minute: Optional[int] = max_tokens_per_minute
+ self.manual_max_requests_per_minute: Optional[int] = max_requests_per_minute
+ self.manual_max_tokens_per_minute: Optional[int] = max_tokens_per_minute
if max_retries is None:
self.max_retries = DEFAULT_MAX_RETRIES
else:
self.max_retries = max_retries
- def get_rate_limit(self, name, header_value):
- """Uses manual values if set, otherwise uses headers if available, and if not available uses defaults."""
- if name == "max_requests_per_minute":
- manual_value = self.max_requests_per_minute
- default_value = DEFAULT_MAX_REQUESTS_PER_MINUTE
- elif name == "max_tokens_per_minute":
- manual_value = self.max_tokens_per_minute
- default_value = DEFAULT_MAX_TOKENS_PER_MINUTE
- else:
- raise ValueError(f"Invalid rate limit name: {name}")
-
- if manual_value is not None:
- logger.info(f"Manually set {name} to {manual_value}")
- return manual_value
- elif header_value != 0:
- logger.info(f"Automatically set {name} to {header_value}")
- return header_value
+ @property
+ def max_requests_per_minute(self) -> int:
+ if self.manual_max_requests_per_minute:
+ logger.info(
+ f"Manually set max_requests_per_minute to {self.manual_max_requests_per_minute}"
+ )
+ return self.manual_max_requests_per_minute
+ elif self.header_based_max_requests_per_minute:
+ logger.info(
+ f"Automatically set max_requests_per_minute to {self.header_based_max_requests_per_minute}"
+ )
+ return self.header_based_max_requests_per_minute
else:
logger.warning(
- f"No manual {name} set, and headers based detection failed, using default value of {default_value}"
+ f"No manual max_requests_per_minute set, and headers based detection failed, using default value of {DEFAULT_MAX_REQUESTS_PER_MINUTE}"
)
- return default_value
-
- def get_rate_limits(self) -> dict:
- """Get rate limits for the API. Returns a dictionary with max_requests_per_minute and max_tokens_per_minute"""
-
- # Get values from headers
- header_based_rate_limits = self.get_header_based_rate_limits()
- header_tpm = header_based_rate_limits["max_tokens_per_minute"]
- header_rpm = header_based_rate_limits["max_requests_per_minute"]
-
- # Determine final rate limit
- tpm = self.get_rate_limit("max_tokens_per_minute", header_tpm)
- rpm = self.get_rate_limit("max_requests_per_minute", header_rpm)
-
- return {"max_requests_per_minute": rpm, "max_tokens_per_minute": tpm}
+ return DEFAULT_MAX_REQUESTS_PER_MINUTE
- @abstractmethod
- def get_header_based_rate_limits(self) -> dict:
- """Get rate limits for the API from headers. Returns a dictionary with max_requests_per_minute and max_tokens_per_minute"""
- pass
+ @property
+ def max_tokens_per_minute(self) -> int:
+ if self.manual_max_tokens_per_minute:
+ logger.info(
+ f"Manually set max_tokens_per_minute to {self.manual_max_tokens_per_minute}"
+ )
+ return self.manual_max_tokens_per_minute
+ elif self.header_based_max_tokens_per_minute:
+ logger.info(
+ f"Automatically set max_tokens_per_minute to {self.header_based_max_tokens_per_minute}"
+ )
+ return self.header_based_max_tokens_per_minute
+ else:
+ logger.warning(
+ f"No manual max_tokens_per_minute set, and headers based detection failed, using default value of {DEFAULT_MAX_TOKENS_PER_MINUTE}"
+ )
+ return DEFAULT_MAX_TOKENS_PER_MINUTE
@abstractmethod
def estimate_total_tokens(self, messages: list) -> int:
diff --git a/src/bespokelabs/curator/request_processor/base_request_processor.py b/src/bespokelabs/curator/request_processor/base_request_processor.py
index ddc7d268..b9fb09f4 100644
--- a/src/bespokelabs/curator/request_processor/base_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_request_processor.py
@@ -41,16 +41,6 @@ def __init__(self, batch_size: Optional[int] = None, require_all_responses: bool
)
resource.setrlimit(resource.RLIMIT_NOFILE, (desired_limit, hard))
- @abstractmethod
- def get_rate_limits(self) -> dict:
- """
- Returns the rate limits for the API.
-
- Returns:
- dict: A dictionary containing the rate limit information.
- """
- pass
-
@abstractmethod
def create_api_specific_request(self, generic_request: GenericRequest) -> dict:
"""
diff --git a/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py b/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
index 7e18f6bc..4088a91c 100644
--- a/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/litellm_online_request_processor.py
@@ -64,6 +64,9 @@ def __init__(
max_retries=max_retries,
)
self.client = instructor.from_litellm(litellm.acompletion)
+ self.header_based_max_requests_per_minute, self.header_based_max_tokens_per_minute = (
+ self.get_header_based_rate_limits()
+ )
def check_structured_output_support(self):
"""Verify if the model supports structured output via instructor.
@@ -158,11 +161,11 @@ def test_call(self):
logger.info(f"Test call headers: {headers}")
return headers
- def get_header_based_rate_limits(self) -> dict:
+ def get_header_based_rate_limits(self) -> tuple[int, int]:
"""Retrieve rate limits from the LLM provider via LiteLLM.
Returns:
- dict: Contains 'max_requests_per_minute' and 'max_tokens_per_minute'
+ tuple[int, int]: Contains 'max_requests_per_minute' and 'max_tokens_per_minute'
Note:
- Makes a test request to get rate limit information from response headers.
@@ -174,7 +177,7 @@ def get_header_based_rate_limits(self) -> dict:
rpm = int(headers.get("x-ratelimit-limit-requests", 0))
tpm = int(headers.get("x-ratelimit-limit-tokens", 0))
- return {"max_requests_per_minute": rpm, "max_tokens_per_minute": tpm}
+ return rpm, tpm
def create_api_specific_request(self, generic_request: GenericRequest) -> dict:
"""Convert a generic request into a LiteLLM-compatible format.
diff --git a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
index b5b64994..9dcf97d4 100644
--- a/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_batch_request_processor.py
@@ -72,48 +72,6 @@ def __init__(
else:
self.max_retries = max_retries
- def get_rate_limits(self) -> dict:
- """
- Function to get rate limits for a given annotator. Not available via response headers, so
- the following is based on tier 5 limits on Nov 6th, 2024.
-
- These rate limits vary per model
- and are determined by your organization's usage tier. View the following:
- https://platform.openai.com/docs/guides/rate-limits/usage-tiers
- https://platform.openai.com/settings/organization/limits
-
- Args:
- model (str): The model for which to get the rate limits.
- request_url (str): The request URL for which to get the rate limits.
-
- Returns:
- dict: A dictionary containing max_tokens_per_day
- """
- model_tpd = {
- "gpt-3.5-turbo": 5_000_000_000,
- "gpt-3.5-turbo-0125": 5_000_000_000,
- "gpt-3.5-turbo-1106": 5_000_000_000,
- "gpt-3.5-turbo-16k": 5_000_000_000,
- "gpt-3.5-turbo-instruct": 200_000,
- "gpt-3.5-turbo-instruct-0914": 200_000,
- "gpt-4": 150_000_000,
- "gpt-4-0613": 150_000_000,
- "gpt-4-turbo": 300_000_000,
- "gpt-4o": 10_000_000_000,
- "gpt-4o-mini": 15_000_000_000,
- }
-
- if self.model not in model_tpd:
- tpd = 1_000_000_000
- else:
- tpd = model_tpd[self.model]
-
- logger.info(f"Automatically set max_tokens_per_day to {tpd}, model: {self.model} ")
-
- rate_limits = {"max_tokens_per_day": tpd}
-
- return rate_limits
-
def create_api_specific_request(self, generic_request: GenericRequest) -> dict:
"""
Creates an API-specific request body from a generic request body.
diff --git a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
index 2d21e06d..c9c1e34f 100644
--- a/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/openai_online_request_processor.py
@@ -98,12 +98,15 @@ def __init__(
self.url = url
self.api_key = api_key
self.token_encoding = tiktoken.get_encoding(get_token_encoding_name(model))
+ self.header_based_max_requests_per_minute, self.header_based_max_tokens_per_minute = (
+ self.get_header_based_rate_limits()
+ )
- def get_header_based_rate_limits(self) -> dict:
+ def get_header_based_rate_limits(self) -> tuple[int, int]:
"""Get rate limits from OpenAI API headers.
Returns:
- dict: Contains 'max_requests_per_minute' and 'max_tokens_per_minute'
+ tuple[int, int]: Contains 'max_requests_per_minute' and 'max_tokens_per_minute'
Note:
- Makes a dummy request to get actual rate limits
@@ -121,7 +124,7 @@ def get_header_based_rate_limits(self) -> dict:
rpm = int(response.headers.get("x-ratelimit-limit-requests", 0))
tpm = int(response.headers.get("x-ratelimit-limit-tokens", 0))
- return {"max_requests_per_minute": rpm, "max_tokens_per_minute": tpm}
+ return rpm, tpm
def estimate_output_tokens(self) -> int:
"""Estimate number of tokens in the response.
From 585a495a7a20b31bf96015a93ecb33ad471694fc Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 14:34:06 -0800
Subject: [PATCH 18/34] small fix
---
.../request_processor/base_online_request_processor.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/src/bespokelabs/curator/request_processor/base_online_request_processor.py b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
index 02f4a79f..5e24228f 100644
--- a/src/bespokelabs/curator/request_processor/base_online_request_processor.py
+++ b/src/bespokelabs/curator/request_processor/base_online_request_processor.py
@@ -241,9 +241,8 @@ async def process_requests_from_file(
status_tracker = StatusTracker()
# Get rate limits
- rate_limits = self.get_rate_limits()
- status_tracker.max_requests_per_minute = rate_limits["max_requests_per_minute"]
- status_tracker.max_tokens_per_minute = rate_limits["max_tokens_per_minute"]
+ status_tracker.max_requests_per_minute = self.max_requests_per_minute
+ status_tracker.max_tokens_per_minute = self.max_tokens_per_minute
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(
From f41aa91a711d24b7e1a2fde4ce9e4d40acd8fef3 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 14:39:52 -0800
Subject: [PATCH 19/34] update simple online
---
tests/simple_online.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/simple_online.py b/tests/simple_online.py
index 12731eb3..30162241 100644
--- a/tests/simple_online.py
+++ b/tests/simple_online.py
@@ -19,7 +19,7 @@ def main(args):
max_requests_per_minute=args.max_requests_per_minute,
max_tokens_per_minute=args.max_tokens_per_minute,
max_retries=args.max_retries,
- require_all_responses=args.require_all_responses,
+ require_all_responses=not args.not_require_all_responses,
)
dataset = prompter(dataset, batch_cancel=args.cancel)
@@ -45,9 +45,9 @@ def main(args):
)
parser.add_argument("--max-retries", type=int, help="Max retries", default=None)
parser.add_argument(
- "--require-all-responses",
+ "--not-require-all-responses",
action="store_true",
- default=None,
+ default=False,
help="Require all responses",
)
args = parser.parse_args()
From 82817e50bc0022051fff4b649c85ea741aeda166 Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 14:41:25 -0800
Subject: [PATCH 20/34] update simple online
---
tests/simple_online.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/simple_online.py b/tests/simple_online.py
index 30162241..4d5f90df 100644
--- a/tests/simple_online.py
+++ b/tests/simple_online.py
@@ -19,7 +19,7 @@ def main(args):
max_requests_per_minute=args.max_requests_per_minute,
max_tokens_per_minute=args.max_tokens_per_minute,
max_retries=args.max_retries,
- require_all_responses=not args.not_require_all_responses,
+ require_all_responses=not args.partial_responses,
)
dataset = prompter(dataset, batch_cancel=args.cancel)
@@ -45,7 +45,7 @@ def main(args):
)
parser.add_argument("--max-retries", type=int, help="Max retries", default=None)
parser.add_argument(
- "--not-require-all-responses",
+ "--partial-responses",
action="store_true",
default=False,
help="Require all responses",
From 19089cb2e9c1cbbba876ff4629884c025137057c Mon Sep 17 00:00:00 2001
From: Ryan Marten
Date: Fri, 13 Dec 2024 14:43:21 -0800
Subject: [PATCH 21/34] fix typing
---
src/bespokelabs/curator/llm/llm.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/bespokelabs/curator/llm/llm.py b/src/bespokelabs/curator/llm/llm.py
index 08bad431..589b3e4f 100644
--- a/src/bespokelabs/curator/llm/llm.py
+++ b/src/bespokelabs/curator/llm/llm.py
@@ -44,8 +44,8 @@ def __init__(
parse_func: Optional[
Callable[
[
- Union[Dict[str, Any], BaseModel],
- Union[Dict[str, Any], BaseModel],
+ _DictOrBaseModel,
+ _DictOrBaseModel,
],
T,
]
From 3ef189a21d33155bb1626fac3a583180cd3f15e0 Mon Sep 17 00:00:00 2001
From: Maheswaran Sathiamoorthy
Date: Fri, 13 Dec 2024 15:06:11 -0800
Subject: [PATCH 22/34] Add some screenshots from the curator-viewer.
---
.DS_Store | Bin 0 -> 6148 bytes
docs/.DS_Store | Bin 0 -> 6148 bytes
docs/Curator-Dataset.png | Bin 0 -> 279522 bytes
docs/Curator-Responses.png | Bin 0 -> 422239 bytes
docs/Curator-Runs.png | Bin 0 -> 442981 bytes
5 files changed, 0 insertions(+), 0 deletions(-)
create mode 100644 .DS_Store
create mode 100644 docs/.DS_Store
create mode 100644 docs/Curator-Dataset.png
create mode 100644 docs/Curator-Responses.png
create mode 100644 docs/Curator-Runs.png
diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..bb875642a4ca21df3c646358f901a43b482f35d4
GIT binary patch
literal 6148
zcmeHK%SyvQ6rE|Kol=A>6uJz!Em-S|;wHrU14eYAQWH}&7&E0w&7u^t)*tdq{2uR}
znTW-@6|wii%(>5*%z?~3A|}}#%f5bpI8Nd$?{?pLt!}NYH$+3U#Et(T3qKF?Y32o!E40p~Ou|YJ!i#uR
z4DHPmndU*9jwY%gjz*AjeHo{bEIc_)qfFKMIv|>&Ikele*+IY8arTdT^Nus?_qw3(
z9nR-Xv9-N(d^UVco>KXu3FW}Ik`03uyn^z%o@alOCNjMTUzK0w5fTH$05L!etS$rQ
zOc1TrrGQpW3=ji9Fo64m0}atNSZY*T2XuIS#<+!u0y@4W5FLZA!BQhcK)5ai)TP`!
zF}N-Vzhm-TgQZ4Y&bXQx#xXNjj~A|H2fw4z8Fw{OPYe(PiwxAYY2*2S3ct+KNB&|7
zS;PP_@Xr|F*1#Wluqbo3ep?=%wGOmfXegLhq5=Z?!X*F<+(+6fsN)XmkmnjKHR34f
RSLJ|o5m1CsM-2P|10M+DN>u;=
literal 0
HcmV?d00001
diff --git a/docs/.DS_Store b/docs/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..432e199d3f7d83bcf2ebcce84b50fc894571d1be
GIT binary patch
literal 6148
zcmeHKK}rKb5Uh?l1oRLPL2vT`1<4177(bvN&=`?GNP=tbd-Ewi#xDtDb@wQ)t5*-w
z4c#@<(>24?VS5LFOm~ZOU<_bHSH!c1zUjJp&jvLkL^}6)!Um5x#TD**)jubcdxQ(@
z$h+y~wRwIN0p+G1Q
z3WNfoz;^|-_tHlDjuAtFP#_feR6xHEiLRI{wvP7cV6Z0uamch8$M%*WCQT4?#nzD-
znmCo{RH;u4aXRy<*5!(=qthXFGWV$`OMRh;oz8r+a!7WJ7z%^}T?O`hI@bPwN&n^j
z-zCK?6bJ?Wl>#!HUC*YxQthq1%W1Do^c(tzvDV5NtQ8Zj6?3Ak`1ng*(lOU_#n#d2
S%p08;KLV;tSSaup3cLfGLnib9
literal 0
HcmV?d00001
diff --git a/docs/Curator-Dataset.png b/docs/Curator-Dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..71a342461b7422a57f1d18652da03ffa3cf391f0
GIT binary patch
literal 279522
zcmeFZcT^Ky`z}nCDhSfMAcC}j^o|OmB1Iu|DIy>xArPwcj&wze0)hpQ-btuZq=up(
zHK7wCy@VENXL#OUTi;pd`QGRI^IIotB{R&--ZQ)0_r9*{zK2%^dfJz%IjD(&XA-!ot-T
zNCu5N667}IZzr?3u1SlDvGLuKK6^SdOZ;QWcf{k?*f;g_@=xEfSQt@A`xU+AWmwe}
zwWzOe1(>iTAyeTKm(!Q2g$Ru8Mhs8*+LLZ5dcDs}$x`WjvG-QrC_=t{m1ypPYn?DN
z(Hn+m1z7hduNAKIMwF6Uwh-Aa@gN=T#pn6sm&o&DlCI469>p(hEqpl>cc>D(k(n*B
zxKjIwY9=~Ip#M4RNly7lycPc{Wr)jDDW5LRkBa`N)x
zA{uqx2d<|o#BezWy{r;BiJ@+vF(LD1Q2+iLETcnI?}^{Lvs?apR$pA95>?k6aZ$Z9
zwo1y9u2G@%+vIIMTBNUASro20leXV}E&&SmNFv!1Y$st*mlaR%($r&5>SBbIEdOvV
zg=`RKs8o+wENnCvR#O%(I6@|S2sTE1UA5J9k*|nq9wcwT-N){M$Ti#rh^B*K-#;}u
zp_klA*fca-$c=J|JwvZalBeB=B(^g9@-tI}8C)S>Xyb1scH*OnX?;?`RdPu%m)4cU
z;`ZeUa_{haFiI}sjk)Uz6cS;@b4tj|JS0?XB%lzcCW^%?Wb=HtG(%Wq8Gefm`ucY7xl5$fZd3ARAb-Z>K=_Ji4w)rs6QW%e-Oz~H;1Zq2xpKP
z+Hy~jeV~!Hxxq!I-9|HSq`)Z=5jURzlf@A6Mxf{5+u$QPu~6$)JK>NwcMCu5Jx&w8
z)W^vj`%ohvXQ72V!-+$}!v>^Dqttnk-HUrtU86!b2F^oxzX*>(Y*oAhYpi1WzyMB(;pn@6x~5sb(Pk~@mF^dD1T36h#@LLv++n)1(>
zq_UCkO?6$bS@Egyx1^?|NGHS7Xwnx^%)MX~cUt3hdA`F}Umo2O@=IZS@ND?raL};X
zaNw}Tt#__>4z3F&!+SYGe
zMIt{_k26_`Ff!>fH8W4fiZZ(}>kH3-xePxEPYOqh*x!U0DCK&ZIcc&NxPDqM?gvX4
z&y)sO`6Y*aVtdkbldI3XkEf5k?+UX{o}t+ZxYn{(!934gyN2gQ$JFu^!xXB<$K}vDY329o(Hf;!
z-CEYTUtw|4T483HMWMgBk5SSmTv_*c@h|1ihgtb|?1pbSjn+Wl8(CYZYgm_9m(Lcc
zLp9`MKvG!<9b&MYJ
z7D56UoA8`!s+2`wC&s2`sszx4$3?NIfsQ}9se!j*-J!H=l<;?F6cdn!BK
zfA#j<>@6HzkqGajv~#tw^ypIXS1k8XTRG|N{3$q&jX_zLrIoRlQCMm(|HSPqZ@iQL
z?J(EnUF?&B9Kobug1ysyfBRRca;SWG$??)d_0W$_dLUz^KfK?>YRrnie9?-x>1mUR
zZ_fU0!qM^DKYIj`<9o+rM`}CL+dBI$2PuD)k3e{J&JZd}>Ip7bdS1su#e36Xje=I@
z(COP>a(i;$=N7arw|$x$yrOVj^0`jWXF?aA9FM+xTUgXn=l7!IOH=QO^7)DFitVXw
zM+P|tl)y{=o3G;p=rvoCc4I}7Ua~Wa}Eb6;?_U#mLR5!?-2(1=Mft>-T#3Q@>ULi>R0bNbP;ZgIHaqtRsMl{8AhNr_2|>by^UKU=tj
z9c}frC77KOUv}m%@O~%t+w`mT<4x}pQ+;3l7SyvPE(tQVmbCnBg7jg3$?j-2?s1AO
zPNy%iDf?+?+dN>D|8Ev|E}$la6y{ZVyY*GH2RCYz=$
z@T=Y_HY3>sJei!LZ$+p@FCijaor=GJ(~Mit4Zdqwk>jGL=os{O^!3rUQEqfXo!it|
z-i^{_?@|+pOSbp)p9Xqmq%w4U4>MV%<7hHjHl&UZZZX-io*lCpHmK!nRCYV8R+aL9
zd2s7MZes*vQn$4h)E4x>PSegO#n5nsc!Lx>0INp&?b$4}twjgWdDWQK{X!JgrEy=1
z?u^bkf3t_m7Pzh5ka#Pxj!Rn2*eA=Qsphh?=!=xPf?6G=1?4~bWFLO7dTywk6_1u5
z>#ydNsOEVToyhEX?yS6$j&$rAq#kNfNq-5gaB&~+z`NtzyrO*$pHeo!RzA&k;#K=U
zNT*{YvYXaD2;Z5zQV)s~P0%m7UnZR|Z=-kAx?Ad8^v0FSl&!bcbH;=JM0F>?-cYC1jCh~
z29x~q#8WK6q+CW%Y0rG`<-$M|g2V0bTXpcbEQ{R1QsCI}4t*Z4teRZVvw&3s*Rh{(
zzzQoSeYM=dTKOA!pufy(S*dH!<0cGCiKrhd2$F}$9~1L+lWNq{5xKW{>Z=O=*b8oD
z3O)Zm5_tGlf#k<;Dh;MjDdJNzy=irnMC40qu2XdA$zIR=^X%18A*~%gqJ8ifoqL-;
zYKAE^xW|+C{OR;LSE$(PY*#R`;30+GV5#Hxd+KGtlKd#XOwilmd;CoT#Ut7{QQA8BLUyQ)lH<#o;ucpT2
zlK$sDxg2ng=(geA2M>U)p`C}lJrw@J&GRGXkuq?A3U=QNPDI2ac=08E@Z|b7Q2&IJ
zk*TMt?jt2TH;9DobGK*q621`FMLk3+zDmF@#NN}E&llnfg)8~0^8a;(60m=97{t%_
z*Cn1Vs{E$927GtjJnZ@8BqSvy`PHcT`1n*jo;xT#(Y*H`#eu(6`CoW?!jwQDA0HnH
zA883U4@Z!cqM{;5@)qdUEpgxqakwAU)7DoU3K#fSCI4BEraj!w!wKf;-nv}G(jLKgH{
z|7#2ElW^x2XDXQx5or)T(7bKrOT0c!h2#QfAMV&uTD}PliMSrJE__8RBSxV|QcNkA
zL`0asEQBVE5*i)`QVL`n4s>%noZ0gG@TIZX<_uCC2nn@t?mzr73oT$n4m3FW#{|V6w?l^9AOf}{6|!x
z2R1e&h;L@w
z(P|8P(5m&Xa7Xo0pR1k^|IY*WzxhIqdm&dA!s*qR@=GbNm8scz-Hl-vP}b!lZpYGt
zUMh&0F-Ta$2V`Smgzsib>0e9$=kG7r{7ebh#B6-|_x&TEsK&%WC&;s@5?k#rY#x0oAOHxE()DZK(y{_eslNkN6L~gf8
z+G&9;I_jFod|rn0Y5ey78S9Lwb{wjz!t<3RyUWl6kB+RO#T;BgKe+jCd+Jt7;v#=N
zzrMGUb_whPGu7cFDOxr^@5Z%X)mM9Lw%1UAt
z0fb%H`RQS_B7E6&vbv)#5gI^cu;SMiMsB-NJ5FaRMk`V$GyV_;~}@|^ouEwaWTvPJ1t@ATb31a4@9d|;9sH80!M(9R=(Ryzf8*+mRZ!Pp}t}c2)@8Q1>
zdZMVUv%zI4!ducpxuPaO=m&1
zLeg#EpJid0hLxHBK7iZ1(+aZ%z0L0OWrXFh>AMu4I@n$0kjY_kCve%)ZjqCmcN>=Wj%=3fF
zTm1)hfai{QC+!#oxa(Udn?Bz-f6}A>#95y#Fida!8-_1gPfp9Kx4FMHoWun5nA5X#
zXJS&ss2q22fng~6bCQ};z_N7CO3w+Taiea&)Gg3u!Z9b1L7!4ezZKuOJVQFU2X0tE7kQaw^Fj(GY!=9nDHNkR?)5
z`FDEDUS7rFjDIxZ+vq34Vz8=g_wk)HkxQR=Ix%J3j%Y+J(!dG)8&}ywCwAhJ~fw
zn%@;t#0ulnmn(UpMPKW70A%dFzUCaw?M;dM;J?`lCJmYkrD&%T%DU3B9SLHh=_TIc
za4C^GAor%7uR=-CX!H0zKiY5$=A6NQIE+|KRE9nhF?u=!K}?Hu?-W~~inuncYT(~4>1MT56Wj2^Kv@Rwf~K_Py^l1Eb0BBhGg?0C5BPui}|8MCS;82VfRn_iw2ONt>_1Hd^lZq^zr@z
ziQgEraJ%dlu*gZu{1`RR3NicAJZ?mzc6PL=P5bLrtGIxmk^jOgp+q)y?uq<~I&+A=
z8sYiGvp8`P(k3Ta4^l*F(Nxagc>_sP5>%QvO>s%p!#T_qGm;A*^g|SQ_~tj>N{d>p
zfzO&s3fnUDNfFFz^DUo~=yh;V@XqFMrKIRqah-!d+`HI6TilOcEd*H4pod#k
z?+vZFfUbDz~&j?*t1O6M)KGjTc6Jeo#BGd
z3W%B(Vny4}kAu(M)-zIUXs_OaDNj|nnhT~^h%c@@U(=d9Xu=P=7!_b2DsUi_gHLw4
zJM5BFSvJ3&pTQ0$9iw?qe`|O8gU`hQLq6$HKe;=K_U63;jBba?cST&*)iHCI(oX_t
zWs^vWrwuYE)xzp0v^ZliLhbd(oPDOA#}9#TCZXFD5sDDHDm`*&lWtC2)bxnmi8x<7w_
zcB8)ltUF14by=t{3>8nSoOF4hwE0>)`Lj8z8w3kaFOS5=V
z3v$<{YUAofJ4tV&+3FIEc#m2sXs7SRzt<*32xgfHab1mqHdKCvD)W#cr&qAogwRD
z>N~x7QPr;7g+nVYtux=)UdcSA!+5)77obdo&IWy2s=p3f
zZVs+FgrXJtWydYAdQW?eA9;Ki5F92jI_0VU`Te=`&zD$<^9#^d@n^M|)#O7}cGYx6
zY2C0AJ5mtYEB<2QUIwVh0?+G6p~GicWX1I5jA
z%Xi9SU1z}~y7G~;X9tK*Ubc)bU=4i2Su&_w%^GTP;CM##WHji{NKTZa`tc;#jdi-v
z_!xA)3z`fvzr5i<0sUyFet=_fDE{2+>Fn<;XSmKAz`N@PAc@XM$#u8CTM;w3NPK4e
z9NiPwj%Uw<>_QdCI}}A@%ME^d_r1Mo3%IFJxJ(xOq*p)aNtW`(@)wJrBZKDbmpC%x
zI5-4Wexzi&wbW-sV}2W(JLU0HKAP}3-5&=nOOMaKXiz=b?&wr}8ij2#Z(5V^5yh&|
z@nk~k%%q$LKhM5gEA7M=0C2ifabls9=u8{?>`^byhOF|7
zDJ%YG?2W10%T;@1GVM>dAbjQm=H_v)(prgO0Lz=CYxoYwd0(jdQ5$_MjnG%U!KdzP
z96rwZ!Mj{bJd(e?1zc{YMTtEDvwTG3C3(7LQGuVVdB`B@2bD>(H+T`zrr5CxNO=Cr
zRlqzy^)ew+nw$a?ij7MuI<1pL32O8V$G$7hN{kbyCQR@tRNVQ`=nwMvJAk!qM
zaaVRx>3F@mzkNr|Rc-GheWb4ZwCDHZ0i`Ql$9*O(jDre`@qMeQ@r&dj-`fmPL8HZ1
z%tum&c6Q1*1JIMd?|9=3c+_QNGhhbrq3V4HKEEDolOKF*IGCpoMjp}j
z?ZW~08wpgc+A*_$Us_7h=NU_2%{cE4KIx9x#L{I?TK&&|-aR#6%-HM0@;eyyc*l|0}9~
z_cnynt;v0dPzzuSRh
zCE^z@e6+GuLnvFfCOsytqi*(e(rq=Qg6AwJ)R!W
zgKH0{bc8zXxSb!kX;px{T@LQEt-LkVyIE#gv<868A@O!&a9c-%?YI3>H)eICqCW%x
zeBjn0(^Rz8{eZRUkLD(9lJO1!h@oQK%3ro&RCS&&?222e*Jy=h<^30LcqPH5)2)zc
z@vV1VqSl1I1q!`QZ7cb4ztLT6gO3PIM^g5(d0GJ&ES_GhV|}`6620gAYqtVi4vYzU
zLE}cL=L4)+*_1N^yPO2l9Gic^fqnrd%GqHsRb7Y?AQ^dqs4TaHwUKwGRt!XS0#6y}
zGLjp}MZdP)FFn-1c^JVzxr}zA;0=H&?Ef-Evp+Ze7Lm=aT_#m-nT-OM)^~wjEq_`v
z(=F3q!9pTKwh6c#^=FN;+Z_zj;#FQ58A(6iD_;>?h%}@i`
z-%O66qLO;NlBTTlWG;WC4QWh@aMv<;{pOQwJ-(+h$Cw9D1k&JleWtxkDvyj-ErFf5
zMq)L!CIdK2*Q~|nqlLJR
zMCDC3W#*)SgP*6e<$k)@r^(Tme-yB$h{OB=U4wa+qGV|H{OmCB$=1p9#%6ubphOE^
zyZ0-?jt_cXZnum=EDpa6QL!VR0xZXNCCx%M)l|vnxq6Bq%pX{Bl8&lkZ7@@OROR02L`>
zgp$iJp{k4_|4SbZn{I72Zn5DX;eR+AeD12gK?XG|@S@l+@e$dkyL>qz!&rWt5c5iQ
zGN6i#k`c$_;oe<^^Y}nc~;@wpKlIXgP8rcKc0w)AO2Y
zFs(^#sXH?by88m1Jx`=}+YrN`I0;m7CS^cCRGeH~FbZs*HRj5Dq!}MpaK;N^ASVnQaes
zK+T?-J_q1~$%%$a)lPR4a_D3M{F1u@k6Y7DS6AwklTVK5|9P^se!s$J@Nn)RKNCt#
z=Bh}q`=Tzp4$x=$ziWmv7Gcx?&H#e8%?v8M-{&MnFzL-hGn@ou`c+N<(nH786q^7)
z1qBmOi>Sw{bytyrNAuB~K4~Wf!G8)|U#CAF(2S<|zSnxam&(&e9i_7ahoB<--cc4d^(D%WTN^-1=Io==O#W6o?IFMNFr(nu=
zuRTKU8Yl)O=U-i2R?;sRS;gyJf&xr*X}gU+NK)B}j`8Tu15U6_W@O38S@DM)fG{R(
z!-Yr?5i0Vt0r-5|Z$*Sx(}ddtErw;S=8ink5bTZ}@3AQsqDtBCkt?5eFK=~^ubDU0
zVp-kFDcyqQ?CTS+Mw$kbY4a#Fmdkf8RN&qubgs_g7(N0pj{o5ecM
zrH;!gjx`&-JiFe^hMcwuzbrB|D@E6Cw3GOTN#|ycIz%pe_ZYAyR0rMa+Kb+&Tx--e
zmsd}G{99---WI-|i(B4v0JcejFf|`{@SYi1sw8I`#(H!r|F8<&AGIwmJ+o5y*$UAs
zIfY(=T1c-QsQsL>^lT|l3flV|T0$qzT9lwziSf_Q$$TY!j}#%hwoA|2U+#xEMNn1_
zWS`a?P>5Z22ci#24b;z_j0CJaBzwRn;Ej=-GtZ6Ndl#4+!xFx`<@FI`xo;rSznhoo
zVo3m$okk{RdMY|Ys#`vs`wC*#J@x1Y#8sqV01`IeuF$tGKrvucV(FBZdavmmK+08P
z9^Z*{l`yc5%eDaNaUdEVo6KU(z{*{)H^Vejy+%Q>sv)a|p;EtQ~3RpKaKEBCbE`_siQKw^mmHBz}4f
z%Ln*w7B9B$aIK_T&+mtAd42IVAY>Q=$1V>BcZTf4Kj5=$aowAM-0|##;81i}T<-(#$tFq>Usb_&dc=4#0jdNi%?E~_&mb96uPm)|}SuPHf
zJ0C^%05#Z5EI7*w!$VmE|M2>3q5cd~H|a2u9dPrGrd}94W)F3=ZIgxFkWb991?1<3
zR;6c+xmSJwT9U<*^+Fx%aW-YrG#%@cD~a%9FfQ)%wKb=iCcjRd*c|r)r@?}1)i;xT
zXbt1JiVix_Ix`Wk`~>*V=fK=FkoDQb)OUIhIKt|G0Q4=clX*00}D
zE&VuhauVx-QdPx$VE@Ka2jljdI!aMeW|&dM;ur
zad5Lv_dsW`+l2Y+GvBmmWoyId31=_?0j;j60BSUlz<&mR$vwuFIQ0EUF_tq&UwIR`
zSkZV`WexLj+r4RN+1{Xfx;GN%{(9x1dS->gso}>A!Znw>JYigXZzK+AC`JLma-s+&cVZVF?02~C6o~*QJJ0-H&
zqp(as=-9yyh2Ajn0oTfWVLvXk5DIwhVbdGUPr+W_m44kiCV~iF3O&g-#nn5>t&R1z
zZd}9@02hK-zkY#PS{o7)l6}FZiIBJ9&Fs<+-)B0?gZ7I*6~Um#`Urix#Gs=NUX~69
zV*vPmn_|=c6A_}gTv?gmk=&VDT7t7QNZI%Qsm01nA+clgCY)(G{o#0hS11eG+!cIH
zdpVM{%<~<4RJh@~MDyOTLac2Pb_eh9(hFVj!K7&dAcmx)Pwn`!cr^pcjJUs6O?O)u
zZZE0@TbXI)GdQ*1oEw5|6m;U)*^gVpZ)0ED}XTmnR#&A&)
z-*b@e0<0H}_3G76cg)gUPJSl@Th5&t7rn#Z6+V;VORO$>CZ#_1^#`{FqOL&U%$Nk>
zs=cngPQTvhN^|ltciQ4!r_-OWf5Y3tL9GO**tb_`mre%ZImehj_|~b_dv6j1(;$D0
z=I19fK#+PTYqc?}S~TYnG-J8Li_@V~m<##z)p&hOZ!Qg=lO^<}c}3Svq`g+9;432z
z{S}F--`-PU*cwqt`jdue%A@>wR*SMe$7PK@5Xv?p!w+w&;4rFf$|Q)aU=fPrzEfU6
zYS$uP@bIyMtEK(8Nh~`^e3cI+JNVIE@8;!DKs}hHRX^KFio-wjOgf;QE$Ne7iy(Xg
zi{C9#(4iDmNc*gvhH^_^oxSJ|W50ok7u2e&)i
zdkv2=y6lkIxK`FELNnsdBlV&9`u)JodR!tBqO@rLZZd7S@!m)_e#SA|%h>(!g;hbq
zNx!JoPAmh?XC$a=5f6=~8CfuW{_5+|(^1N?6bc*vYw>1aSo{zZNrOz-9MkYa#JuOp
z^Gio3_wQ~vb;UR>ZMLFhdfc`roY#!zX(KT8P;n5WV1=TMBt?5b?}
z4T;|3UQ+7|wU9`j#4xFnFJ1xTW^dO;COUV*b1_VjL9!J}NM(scQvzy`$VxxvVJwR|9
z1c^JCRNpQ)OlU>t8Y2Z^y`fedu_7QJ@WiV#WM2r8O+7L%r1tdZ{KUZXxE+t1h42TcO9)o$fP
zh}3fwVJL6nI}JyV-aPo`yGr62(WKDYL!QL0jzT5zmVGF8p0>h)M@rXbdRrk+dtUPz
zuv!PW+ZiL@$LvQ5nP_+Z72LZJYId*{+H@*vfXYR;)X?mSeNzb;1;EYrH761F8v$;=K)sBK
zp`;C7xSGRuU_9;El@742zP_>xGi_FfS7dKWyAxBjcJUr~E>dhN&+H@~d1LJqa~lAq
z*fH1$1`GUgn}T_9rSssI7!1WLaG`z4)>#}pP-`T7j`}-c`bIucs#f8$w&|N6Y;7Mc
zBgb!>{Q=0TQT&o62TB|E25w%-P_YznJ;8wARw7Ille%}YVEhbR{Z**7lg)Ot0>le4
zT4&EbKNKI~b7c?z)O2HxohGr8HU|hO#T6CM-uuY3DXDYsqb266NXdJ>6nG=O+L6uU
z=G0BHhj#aq;>m2y2C1Gb#dV!j=t}Zt;o|YVl35n<;+LR{Yk*-*c;T{?7?24gcFM~V
zd2uY_&P;u6Z{W60EaC7jfF+nqv2DTiGVFyk+0$Rkqe3mJt}jWyaJ5+C^ZoKfnAzQK
zG2n#8YCDYox^8~tocKp?4m4MSq|l=N-6>C@@H=PWmgIPLPF#$g#P#W}cfo@$(h`4g
zT!F4Yx`VHP0b64Nt{X7Un|*It)<1iQwb{q6=Arcmli|f#hBuN2>WvQ=!sc94b|eaU
z&yTt}M{RqpldsMN-jb}06}{%_!BmMf0Vvgoy~3TsJ#8szj(0pB)UHXTHF^}#MVWUX
z>bE=W`xi9pcYoFH)4Tg?6Fsly?k=959-zx9d#aN1K2CegrP+W&EjW&;N1mYzTKN=T
zPYB=WA*T&UbKlHa>6V1_hf7JQ8P>lD?L$^p4!)CM<^EzuQuLsE>wb|XzP_!&;f!5q
zEpg=vQ(^)QanA?OVlV%k-(vye3F%wEeRn8X$=qK9DgLKMkshdyBPB9YL|$nwSJTGx
z-`>fjh}X8|%?BbPqg|vk6WQHG)HPU}XG*lArwT`}z&Az?kLGTuuyayG1=uWeP?!YV
zgymgot4}pHFZ`Vl{+NzqM6|`DqZ{fKif%v$!o8SBbic(fDEKXDC`D1eRaJs}pc(ZUfg~;OR+JRDeruV2nAd2jUmx(+dWR71+
z_fb+OGgb_l&c!lDR1o*h2GNEWs-w#(6mMADqJUbtjV}m9@_2PF`C?C#(
zw$d$|J8^fwi#E}`iUCq^B2LbGrCcFn2|3~0Mf-+ezw{M&p0N7N$kciTx-|qwyVZa4153(3wN@!80+|Gl3|qWn;(l{}^jWtlx!UMD3Q!%HvA;$J$
z+G}O1?s)Q!PHF_1J3GbV=U|eGpHcRD#gmK+I&sG^y4Q2pPjh@1UhU1iXi6k@EDC&s
zj!A$Y8_C+gkAS?pk>K9hn2?|P)-TD%vq@9}r(%tNKmNn-PK{Lr4I%zMUYsHEG!#DG
zSR%ZN7|~N^p8s)V_2<4UxTt91!)A_?ZLLR}@)#&`<(aL_rqQF1GUkdf)WH??OkZ`?
z*#Sq6(FzNvX&}meJt0tHlhrlOm*HV&()P{zn^g=E22@3O#Woy8l1%$jRwl0TRoW%!
z&Gmc1@kv8>MR57`#Wn9Cipp1uS+B-o8~NlsgfX^C%Pk5%yz1k>qcLo_^LICLDl0|g
zMiP%%X1Q0g2S+4@b3jrs?t;nQZmadH*;%v3U8zw;aP~X}I*(w}5p$~PTUH$C_U8~_
z;ncbBy^=N{iUH(U@o1>Fp_Dx0b^;LH#t%^N+-~zt)wWCn^0=6I19rRu#3=Tj9Uk?t
znw^luP5Um@;dkaD#=)mMR#TL@J6i3mlD0DrAAfpI0Giuk6F`%q14F-b?3@6CDVMjs
z*`Kouh}y+`U8UfUL)lK=ee$0T#wH&tPqgHwTE*3GY`cs#G!JI#sviakks`39MV>Ds
zz?1Y-P7#&_seAhlMQZTK`{2c*!M%{kvtd%?E-auPCxS^kE`F&?lvP}XJf&pzTcpqQ
zIPd0yKT-gKc%0da8gV9vvP#A(*Vu;I=-Ye`Y8!a{-I=)93fhh*vbD?7;^+U=_y>TZ
z#86xAqecYyN0A>>rPK(n9(b!cw!aj5w4+gc+@I^=a$=|Dr|37WH&=%~mS*MoUhIv+
z_wmvkRh}FzCb6QgQv4h+|058^yJ(~gLhL#|1JxBY0PDTh!EV#B`QJoiZzK|RzW7>l
zzv7j$rbnXVa{oKh#tp4TSkK7#?A>sh@%o15T$=rjIZc~Q
z0bW8+47{W1@FLX0@GQahmkbOA$PWBED^1-vAQ5DtJwO-8X-us1T{@8`R^qs3SI2qB
z&y_|-B+_G30un(~sQFL_5CpjvHmm4Wwl&-j!J98Eq5Y(xx5Jv8?6F0y5G4>8%H$P`
z+pZT7y4_pDi4z*>``CXKHK4ebAU7qvv|Oz+yNLBFZ;vDsrrO5FhH8o4qJDlUUq6Qj
z(2v2P=$e?Pguy9yGN+Jau`8V&!vXg>j^aDEdmJz2EMg^{rt}GZ1*C|jbjlcfhMdHW
z7~*bDDDvai9|Kxsm2V`E7mBU+i#(G*Az6uEnsXopYaX$^4?qFTETc4lN(IC4T!t-UaEMr@vo0`k|oE7i9{)Av7kx?TKn;l;;Cncy8^eH
zOqbOHlR&@7X;@^G669d0HanFHNPNlO7uK~Vncg%j?^`fb`o50U?wfVb`VCER-n;^l
zI#F56pVftjSr@T8WUO$@__~xyWUXPSMf-;c1x4iZMtjBLupBHw%;fR%Y0+uEiYq}@
z{W)@j==d&SPT>G`+Y+3+Hy2Ky*lf?MxZPKKp$T`C-OtOhqJX};p?*>XXdvo#^)Iej
z5r2m{xp;>R1DSdz$M6&4Hxi^ni*11~Yi;XucGvcl!$hYgzMbrPfI2(5EK}Jm{$)Vs
zsq%vCqD~7h3ICFO37R>;%ItqLKUG4%Ub!yTN&O*ieB-m%hj4F)S_GTMglfR6~
z-zpeV92)S`g*pu(sW3oDL3WJh6p&x39&|nY0OXsQejbJ@1?@6lfX1Ir7fCDBg;H)#
za#1k;$b8>;V!S8URjglmMC7K}CqqSxlt-RDE?m$W2+pR%?AEiW#|`(sA;d&>KfA$S%AYm!PMJHA
zi{;pm1F1W!pJS*I7Z}#W_>MoUj%j)!v8ECXL=h0rI=6C8mV~r&Rq3ws*84+BY&fOK
zo=*%vL^9pF?ox${y%UeU^V(fy+IM|l?Pr3$aFTTC6-$%(wE5wnMc;|;BUaM79jTUC
zu_4Ce%$TH0(3D#9vggWc5q9lAbc1zQ(r0xpxe%N{B#0f6CXa)njD
z+8yyHNKxgXpF3T^p!)fVv9{DF^%hfoMeN&!hlvbsWxoxWxdNuwtW1n51}b
zi2n&)11N*U-1`~xT$Lm|Ziqf&Q(=x1fmhX7aXRGQmh}EAAp%S*GDEYh&>txGYQT
zT=+&4s%7*cczE~7cc5huoayF=vbbkdl0e~8KYtJXm)a~<>Uji&cL-V2Kwi)A&BzY4@)r2fhVD1q=Q8w-{MO~TOs8Kj&!jEE)e2KAO>yqh54;r@QF=~$zmr|#RJP@
zGGq)k>&hs8V7JzJV8n`q>`SW+mjA6axRZU|)c$zmPBdA)f_yd&A6+|RH?Wu^CLJ;_
zX9^DLKF;<5LirMR>ad3^igZJYNH#rAxi`!pg>Qp?p5`So=oP>M_)w9-l+2yZmHr#G
zCJa*xb+}5(E0ZC9_+@5%>Y4V0^amksmpQr8DiopE13RuTUDF^kvV3C0fwWhoEUKFV
zNYViKvcsQ3dbHfy+eUrHFJjTpMhJpzzCfOhJt>0BnQEEA^}6uDnulmoqX;ZBDjl`h
z)2tW%1Kd%irjU;Mry64TVVye{_9B{A&nf0p2C40Q27BIl
zbb^`|&pLfeBScB?X><-yKYV38Ftw3%Lc;p)hxz61H`6h!CNh~;)mPlA=ezO87r?4>
zD$fE6ZnTPmp1CBaJ*;y&?jXl=f~e4p4S8Ol`ZoB;p=LKkW}Z~aT1jakYTP@)w24+1
z$!x3j*-&hD5{NRtEG>9I2!=;`U8R5GVKN>e#@gCTmU3f56kq>_}`#!jpq~W3`OWymk6?_ofu@
z_atOOv5*|79Tnz@;#=pcojS32HyQEitcqJMj<)F>#+(3uZZPl;>9w|L+Xiw47S%
zB#8dj3$fmalSfWE*uU+p+wtMcTH`5RiZ3UxbIWQukQ<|C0b9+7Q5Wel0uJv&q^`k=
zxVuGl?DQ>5dmthntTNk+HrP}Xx`2D>oM<}r){(KejY0{S1szcVei{BZL8*)hk}RJB
zC4So~HpWlSR`}}JGA?3Ju@?0R=_IGGDR>pKNe}T-8P4Lqzwl6d{>TtHz!-RGf?sRT
zWj!g=O0T>Gb*smw@yoZ$APwEXk0l1Z+U8i9m&>^C}+q0MJLcuS@SyxDPIe^v3xLe
zL~`?mT*hr0OciZFB^MAko$Q<^dgWYN1BBd4DDn3GTz9phc?M@5Sia=qH^OLZlTZY-
zii_)UV$dkqgnP^aJeO8nbF^bP4rE4_^~F{M{Qfq7*-iM8kV4wbf(o|~1r3mxt~+(}
zILn6L50iOa~NwXY?|1smQ_!%O$|A}h5k>E~Xv
zaZWn@L&BY9Z?bGxCnB{(gmtJz(8NxM$~cmlB>1}mAR)rtDCp`IlR{kSMV0CRo4}s+
z*X1ZvF58G&sisN{J=W$12hd~3egdhFmSoc-qMVBr5wEEP3rhdR*+pyXr_g`*9&L(#
zNC;o(wZTHwHM1d+u(X}QyzqR6@Vw7dm@2WX+4&~5YoIXK`m`v$n|*hD%YX+AGuk=%
zjj}%5bg0c72oVgE$MXU!ydqE}r7v^?5
z$y=8qP*)$uk#*T7DK2NgRtpPQMyx7qZu=!K?7!CLF*8ci1yslE86
zFzoogQEf;(iOlX2L1n)m+K~g&fr0;b;Zz}adEMBHcOaJMdn@FRprKWM4Mvfo1BmcE
zzN32iMq0Pw{`QedjWecL9gWw&ae0dVwH`QPh%#{J7lY^OS{L>BGC_)@v&AIZg;5RfSNsl-peN
zxDXEHekLFm!RiTgc|s>&eSgMT8DTi0c06EKtRCxtkgxhxWZ=opn{c`^5iBaDIB-Gy
zQc>6|%U5CUPZztHLcK+Ht2Vkxvl?s{{ISKu3b5iSwGmN~#qClRk5NIi_mqdN-b_KP
zJUFm%^gYC}gtu%{6j2Yyu~3{~H?iZuZ1AsFhhG_J^Q_;=zLU4)5r~>1Mi=_)Daf#pJ|t~jk(3g9%8V?1Z#^i__~i>Q
zRct#6rngwKr1Pa_EN$PjZ37eOx#B{$Sf%ZIp#IQ87F|C2%!wXLgTflj=`IQcY7gCG
z#eo2rO*L{$4A-wn-z&a)^J|Ba4QH1aU0btkl@*WIf`&^vna|f1z0;QP`Op6a5bOG$
z=33lqP9MLs$pawKc!(wY+xh7h#bBI?Y;05IybzMnI9Ap`;`R5Txt3-=1^6C)Pcm!{>YNTKBGX&p$3j
z7~bz!?0EKmo^384;yIOrd<4@+f5#am`d{8@D-&(hi1%sfA3?srpsIg)q>@ha{SX!bNJjh{8TAN_^7
z*n>mDl#)K%R$C)Yp+$>X-S0Zv+zxzIWqdkrpIgaWbxTS#`{tpm48p?Y*GEDh%+!sr
zr7haIe_upy)Uf)}UT-g6nqvKrcZtwX6+yj~tz(5gMAB%Jim&LSCfKkYv-E$h=#!e=
zlVRh()KuK2P4|m1Atjdm@kzwG9sF#{@27i9-UilS-lVBNgI!GED;)*ipOfp=E^s!fq3g{+OEac
zetU#XY=xyAf~(n*Cv9^!MoVfT_3nSbbJf+SP%a9xeHbM@zP$L%=m2Vrk`2v!VSRQZOcjRzOd=`WMo`(nZ7sZYrIb#(*a4LexA2T8i
z*xReNo9lu8wj(Hp2??R1yL}6du!YLOCWLuZv=`)=f*nUBe)d*-pFru=`O7BgE_dW5G(#B5
z*|!Yu`tir5(BRYZjmF~XQT;;!H3Ww6knsm!(`c)7m!oa&BsDv&K<`gU7){5=pA0vI
zIV?;Xf=rY>aZDB;_G7YNy*G;q6h0rB;gh}JF+A|+06S#5oxwSFvuZaAL$*6AEzpjn
z#U?j#os*xo_wxc;#ET1Q>tBy_lKK(nRgLhb^)s5-Sf_sKCVjD87@f&&qk&^_
z)`o1KP8zdoFdoR(_XrPw2RD1ZoEB5QHvTzV*xzU3Yj+>`>0|TdujANX}vs$6*
zvI2I(Uz6=1)1z7{($LMD$=IC8{S>TjzrLJyWjb`SX{5iUbz-J(6CZ2Seq
zmAun|du0p6GfGdzsa;ZHZV5d!AHIJY8#yh(Cnp{VU}Yk?M3ITAb?~eFY`rHpGTer#
zbSTJ~t**?b>r3WH+44%xznHVF{E6_i%9)oN(Dqt->KOAwlSJX~8)-_*=ej(Kvghh=
zLh+HT$Qndrt0H6$+E@Guco@jWOtrfGLS=hU)TAbIr-}pX^o%H
zR7X*rt1Hn=ha00)iB5Jc7|jUts{xEByiIFJgr@tY9L+Yq?7U~2bjG-87hKefF%M?j
z(4Mmk3-R<0^DLEtgKGCXNwod~bFIm#a5w8q{}IlWy^q4)eXUuC@99lii_-$lYd?|r
zZ1SlTkT7mg%=DRMQb#$meY$z6zIahrK`vs_)7B4lZOj#wTcY!DKR?%;4eoX;_1A`j
zR#MUPj1yUx>y>1m$hQ0J;ZBc*)ZQ<#nh-fi7Hv%
ziz`uQYFWrH9{)1ee(jJ&lr-f3A!$|pj)U6rp|1NTMkvICR9|#Ha?0hiy0a&Qs}gBS
zXWO}QqZL}tvrll07)1K_6%IcwK!Z^jetT2fBEWn+1lG!yPU1VC*yUh
z3$pu6qGXE)k^^qpEA{=3S9=;R%PwD7I1^=@U`-SL-Bv!-;*H*St9?TUJafv9&ihqe
z2?<@}(3GKwwEddK(WTMrYx~PwyyxwuTlhzVcj~_oC!hl==d<$XOTx1a%?>rjjCz=Y
zLo-U!TfchlWV_|))E&R}be|LhYpmBO`D)bYn=8EjO;{*jSZ4E4U~`Cd%(R~KxH6)}
z4G)vmcQmfT#|jhY8Nw;Jn(J<3KXY{+KeLJVn5j44ux8bBK~4ly!JzxbX`wsHTd%k?
z9__1UNQ-KY(*z)Tu(E*OR_fVMZ@G!dPre*^9@dF06Qo#iM0i4weD4Csh4ZmsS+A-g
z72hK{#yY3!7Y28>1|j8neC^v#+_|+#>$;pHZT+4yRx<5Sh-aEAS|vmO8Ov%We&ag%Lu>huH*uK`*Kn>5
zInrNyLJh7s%L@v%k_%h8{QBvSF5G5X;fqmvUdhWOZS*;n)cVRhuV-5RpFEW^YLCv0
zt*pLnL5q#ngz$&m^IUE?6KKwC9#UTwYF>K!C?F))hUBCX!jiGo{pX&Z~<-2Wpd^3(^sGgksT9IsJpcl^OOnfsh
zjg6<#`q+~==I80DFPf`l(>_Eg`g-}U?qwq?7G6Brp3v_UGqC^t=1Fo<>xxq7o8-=<
zIsIB*pa^Sy+{-WDEub((QT&jCxfX}}epvn1D^6m1``^ra$eQbUup6DF=R
z>m;3Avu+K{RnrtFKM!pY5Xh=tj>IbqnN2RsRuI%Uw^vh`3Z`8CR)FpNSjLO$Us#X%
z16raVzh`P@c`=1GpGHnR#ZjAWpFMOkg4OOO=o?Sno+XMpHWXcWs6o
zr)maA7~l??B%yDPlpc4y@=H_!XdYccLoHKXYqHO*A7i)hy%w}u48kT*sdc=4%U`}1~?$SHi0mmVGRgRYBkubw*Sde-7WOQ$lv7bEo@+hd{?U{|=X9&dWSYB(=@>!_(;
zr>j%on0)9PjLA>|vwOgtpicG$j|-!rY9XJ~uf1YD!aiEcvv}0UKx@n>j;ZLSzsNvX
zQtk5XA8(zoX3rciPcSp#??ovxQjBkXDW|XR_pOHtY_C#dFN*H@ozUDU%i%JufEV*Z
z8(iT@_dG8J_)O(ocy>o5=M<{T-`~O*8W02cnF7a#SN%+vTGOo00_jTi$ezIAt{%yi
zo3fs|u4|<>j4%9Zf+y0oMBi*vBra&s*Hqh`5m#{lnr{&g{ak&e*a(~WF+n2}(CT!5
z^?DW|HXP@RdjwZTx-hkj$N6gh}!87~oZEm`UrLFxKwpMvQ
ztH#j1onEQ%!^u*2*n%`ua;4^d)on+gH=~bYIINo2IfR<%-7*q%9{t?HWGssMJFrfZ
z;Kj?2fq)Njl+I~(*Q!1`OK^!NwX01$PRi}5rYX_*u%*$s&S@oGkyR~T9K(x_p0^v$3a`q>Ljek#t;pbQofz~^4dTb
zchLlYi&%>|LRjo?0x}w1XrtVGXh%_mCZdB|cDgQhV+K1_5nXA)zcNv5lF(dzV?W3&
zSbS7<-UyVf|9ULLVWer^MptsVi%5h(plWC-EbD?=JPK85S
zh9W|Mf6*DZVQMs?0cYG!dSS(hd8%&$Zw`)JCb`E|P-n&7e{ZfQXfE!kSs&b3BdvXr
z2a>EaB!sa_HX9DDMnSpBl!UuOFT{T&R*BbDU6uq0sjX(=WPmxjd4WIm(F=hl4I#5L^igvL+Z3pdCORJm1dQVz7?Ey$SYP^^(uW{X!txnrUz5
z)*Q}oO+hE$NoA}k{
zGWxPw()~KmwzWuBBSTMa
z^!fg5(_4Z-(NqaIAqI}%7i*-=m&)TGS1{{OcZs!qzQ)}@MT$#5sMFmBI3u&7co$>1oj?DK&8=
zvZ+lgMc89|v&p(cn!L7XFUs98_)y-+YKPot9@)beF+Le6vZKM0ebR6?`5uc>cT
z$&2@Vt-i|VUz)}~drNT@P<{+5d#qHDgo9F~mNIHYCTMMfaA|_-0DIzu$9|*j12V@R
zjYJt_9C@AimDB(G?KY=YTbnZfVy5ZHEA$;4ueDlF_&I;NwD+jKL76fkt#_^d1T(w*
zjq|}nTsQj8qY7HZrfG4$nc=9P$4OAFDY@GRlteUTNwcGL&g{srg-xQ;V;AxmLMAUz
zFDGdkL<>q^7&_@IF}9w-guk#Qs1(&~C`tXjK;_6CR^YGv{6
zojb6QVTqxE??
zNL0b*K^hZjrB?Uf`ni@n{G3U+?8!3=FT#l$AiL)_FprYfS)UYJ>j`xG
z5VSbhT4+;+FCIQHcC;XW3#6mcJ@FNm)E+Lfh5o6YHaaN6RLQYUR@;Q~X>4aeSR*?N
z#rtTxueZ*un{rr*>krBM6yNP-ex`DxE3xnap#iz+Xp}C
zunx|gx;@C$Y)i9n`3a9s75b!c##{TM*kAUY4vmL~3AIPc2(g!c+^)e-x-Le|dtBZV
zpn6}UOyXc>%H!7H0Q>Yve2^zz%xO^d-|O;uqMisG^E~@h0(Ct8Eqmqx`lAhFZ`>&^
z-3{}xU5d&aEq$AwOS;dGqFe9In>d?~3w$?%9-+L=j|Xrck{-|r-HRJ>bBfw=_H3l(
z^E+BpY2j*l_++yuUM4K5QRq;RMeLCH8`(FFd2cPWCbHkAqb)u*%-8<|y^>nz>u9In
zn$Cf1BK;>qTKlLNK8|rB88TfZ(@i!OSfL-;4X5d7?tKg!G
zef}rw=AZ3-{PaoD6)k3q+*hI56<9f}$4TJB_u>w?mJ=;3o6EaR<(g`ATz}T~wrfLC
z1&w~vu@?W8A18j|&JSVUvY3F#2`E?CkjbO(`7bGD<+#?~hIrp8Jfdt?b|{20{oQhA
z2@939YZ39l`yNH1Ws+rYn=@8xWpi$s1gB5aNS$rCa_=FYV%64IO(r|6N&+I|VBr_Q
zy_38t*457IIj-#h455Kv2h`PPv+GZOjM=Y#|A@d?QZDkAXX0x4dS~Wy;O3+j#Qys1uorg_pSo3~k$=Cs5QXBy6=%)N^yd0Kg_MvGbUQ?dlF8b|v5TY=
zjfjJYyi!wPW*^Wmaj_MAY#?kU{&+e*(pCr{qI*z78b5w%yIzuE<)vME!h6siQ5alV
zv+{&3Whs9V?Q-eMUL3+kRtYKP$_lxCS39UuH`mmEQa~b;@?OH$k^N^-y|aZ3v4xdTZ3hjNHQP_0?ywZTI=kII60(n{8R4_v)maPO$9UieCLg_B
z{Y>k8%&X6c%D^4Hp#{`|n!DD{z}x1oxHNOAYM|zjv_wm+w8y=bhy2O6o0m0Kp$xorcaV&fIb
ztBucsY}2`tW2SMM$eoQU=>H>xREQLXi@s>o^k~0*m@t_r&b^5%-KU&O4Fb+Ylt!A)u@g-Gir@2=>wAIOyhEMd9xOG#Gg~6^Wz>
zUc70B%BeMA>u8NFB-NOMq>}&SSV2uiSFbyd}~1rlg}TW46yUX
zXRiEYY9$*0@s}F*N~bj-2MQ?{CNoVO@KG?oq5~>F?Q|Upy%E39gmzyTd&46*6BlIE
zv~`XfRPV$1V2Ups>~}R#KN5;5Lf;b;(K(V6eBc;Qg!qO@`Y*BB^&;@{udLW`nyVo2
zrKR;VAJikBA%sSAAbj6wa1^4tfxcaLYtK8=IE2jq0)31@WGA@+Z(5n6F(ll2#&Zzb
zBu^aIEh9xAWsB=SP
zBNOg~9;Z|7owL?u{Al7AN~FgZ?&y_#7REn=Mr6N-??&9i8ndJw&0H8)xChDpP*jsW
zyb>zjp_SVFPNReo!?Z`@jcP@{nn%OZhOb_{nWtzdc01m{ongX4dBU=VN_6NkKp6TE&N5Lw~kxI{#ao=q0=E^
zg|AsM0_DRVx*+gFBnHNR&^_~y3*~u$U$@YzI#ZuMOMztNrEnFy(;T8DN&*s*ryOeI
z4-(WWdo=#}eg17Eqgzb{GGgRQz3D9KPY1VKfYvqMWb)60QC-d>6p<9=8Gfn7oAxGoSXzuEURf6zH
z4HUy!p+jDEAGJ@}Bq*o8*;BO${m#$wyB~P2Un0Q&9BVOvdA0vM7zKGafoV{3i
z(k?9ooUgLO_n=!66d}l8O~pLNxSX05+JECo7B4!c8&vm@jX$3|=IVD0R!CAy<nE7n|O5yOFP%qaaMij@=U+&H6wEGNX@y859wjdOZ#wGJf#!cUSJiSl+i%
z3Z=Ifkd=09?bvT18%4EC3k9|(>>KH`!>rCf-p~Jdb@)3{);|0llth^m<-;$gXlt#$
zJoWo_AfncX`E*JjTi>(4QRwP_sY^gh>Vlf3PKZ88O1o(|b6|JBfRyq!|EQQvM{?uo
zD`B#_l08+{q@##TWSFqU{V<4;9_yT)sAz-RvQbJJRl55#_SYjTGwLA09R^7V8@EQ~t=$ghG8$_N&IP^1uTZ!rPgP5gAOiWRp!T?C
zyxT#s)2}o3j0kF{Je|gw_g(~wa`)*&J$WXosSnDW2?g>S^~4s^-JR;!2Bgzn>A4j<
z+jzCi2Nq`C8TlUDYaI0%ubXA|knCHaKd)V!7tE{KWhB`g%5Mo*zR@FzdFl<+2{W`y
z`m*$DgdBQ3?|C~t0Xe>&_V?E;hAKTV0(9d=!bt|18o6~)0a3w&
zz+@V5W5^t)Bk`+Rd5_-oJ55A)71a-nDshGN_V9
z2Xm|QO;}pwQV^y%%|V8BmV1)vk|}ED(2a8gAoO!!x;Hn8>f$=pX{+gTIkBxVl6dD|
zKlCgswj8WpYaJp?U9~lFopNTE57PyyV-ItHmG9g6SEh&RzS`X#rOkL!U9hh9y(yTx
zq|#zI#{VN?!D~6))s@@J?*;JaUCCbRTW;7Bpj<*Rwe0eNDJC@in1VyZIq~VLCcBL?K3^
zH&`Pu===w7;LLszv-@$_NthJ(hQ}n60M%I{^x#;=Y@g$_(j<)7LUlt8hv}DieqUxt
z_^o&F?tIS`Ps&Y@Kv%<}IOz*{+xOZEAbadXMqAjQrRRl)FB@m(d?Lg69Ad@5QkCu1
zE^$atab2J4Otk<(&t*XV@xa1+!@rym`oI((%CUTl^5b4le6{jcg~9mMp@R6pHZ5ub
zv`>G6)`ggcs*Zd!bvz7t_+G&IIQ{40PT)HlAOoDfv3H&N7q|3p{#nbFh3OaD09E@m
z(@rd(on{(+P2-U(dpu9mD)*>eRyL}FN{#9>W5pfRIX)|Kmt}Lr1br3XB;p1Uwq|aX
z7dZ#BpF4J}65wZi{j1
zl1pFRn5!UyfT-i7$+_euUkkWM`NRTz>lPkl$G`FoKX~n_d9E0}rrI6vQCtV-guFZ9
zcI73ru9o#o?d`P*R?R}I_gm}JnTc$Pvn61u`I6IU*PucA5m;>B)6I|i;W`ju!iS(Y
znj4iobaX1;wDUGYT7a0$EegW=G+)Ed$-v8wvsxQaB39sMk!0P%gPv$Q)bGphly^Km
zNI1T{DsL}HcBGIO`!jY*ss}M#{~?5e?TZYp{BXlptKA%Cz7R?=Q6)FPR_c?cs%O&J
zyegu|Y21(Tb_2q<#$dVInSQ#rTQJ_^G;S?8h(oCW4#g+hx1TdvphVqOV4*1vT(5<9
zf@*Pj8o3Xlh0&G%Im+r1mNQs8bu=6pTPbl3h
zM4grhv`!pU2%d?Yi{AOwp#;l|$%O+eV!~Q63@zMwRD6Y%jKbzW)DteDMej|lf_e^O
zeco)GzXEcz6MePxI_6NOLmRTPjQp%;b;Q#iSLpgx>K0Vnsjfq$7mRTr_H{m|W;$M^
z<4%XE5x1CG{g(Z}dGeeKFqzHaNY36kCRILuDKSFC5~E#gm+eI(cHY`H%M~c~z3_4)
zPdlrcUW8|jyf|_u?N$)GlQUi2D(b~5lp5_w7C#-6f|qcr8?1w{RDY9WU~ls-)*rs*
z$-Z~|UvUnpKTcAE{q7;`L!DLWt%i#;Hkxkv)l-vfr<$bd6US>F<|5NhF7UO{{!Ai#
z)}gMUhc1sER^5{&>(3}ND80Q}gN~q9i@$u#-p!!SFI(86FW%|dDrPS^O&)hf;c0iH
zN4GrDWPP5Fzr+!T+`r`A_@60eAm$&g4G{$2xMQu$^PkV<@1;9M;&yBa=J+w+ui=5ioaJ<2
zbiu_69It6uwA*s;avhW9^?<^OSdRfkP`N3zfbI)xG#WTW#KNU;p5%+ER}-*&ST$u&
zJnU94yl_-w!sw*9>#7w!yCNY=zm`&6<`=L>ZWG=u=U`W{3z)Tqu`??g74BtAv;BBv
zGFJm}Egkh*qtm%2{VX}go;5r`i705%CnO?E$Mrdn1;!i++l<~!y_c;|$UyF1%U|F0
z#)a*XN{w9w9whrt@3Xo5=(u%J+EuuGlv-?|gIp!THOry~
z;w)3&GIc6=_O?cg%URrZ>4&kk3Z4%KZ#xeOcl<1JUNTkp7mmzI7ZG1<5hTJ5TJeQx
zT;M5A#)j{IBuLbu9g(JqBHLvca+NLA>*0~mW-j>txJojcHOaz8a*SNyNekmBYuV9j
zAdq}V!BcKQVmnA+v$-rhIjf)vJM9}wD2=;{d
zm|^yKOKL^IIkh4*n=Q1<;GjETzuPfm#!IH7YT(E(TdQWJWfA3A-3%dKwVtMpCdiro!TDb?Y?k}d%6DmP1?e=+E1$`+cX5Z+3~$7UM2vrl_L{==qf
z%8(`2W%jZ-j5zW7%*2Eoj1=D%p(~NMc0QcgArTVXoo%>6>8V8%kx*zZjJRGh_SemG
zRSHs8saE4JIbu$%3E=b1jB9)j9V+0w*_Ull5AWWQsUxj=#S#1ywVDUCCUwDl;s&c!
z?3eW`8Q8aqVFFde6)>hf4R6kP89Xya*$cUDLJxl*7b;nIx;qoMmqwKB;+2#q2^pFs
z_jU3ez&&;0EOnSJ8s1!;uPS|7Nx5%B*nM-ga6UmN^Kd3<{IJ>A0%|7Vu$ckpY2Lw0
z80_r=q#cw{3#9cZDZN}3(}Wb$W>JJT*#R6qE%$$DypXljQAV%6yrl(*l6t%lir+`RBV@!jX_uc#9nUw1
za^W;DgEJ$?*9EYcQGRlVEXSHoD-yg4CBN(?bOW&?NhwBw@(Lf!@N08sNxJ4k6Fj@Jh!olIsN%O&k|nB#+@`
z<5p(K^X?P2@3)t6TBSI^Jdu(s^I!reo>ovYg+dNy(GdtH@$)jsaUtgeG8PwDh^JXu
zg9Q+!iO*!!e2?)OOw=U?DM?Mzd!=9pxx&JGPz;40JKHZ)b06@*kP!BfT$hzu;m7ks
z4by6(8YIL|iB=1cfrS?K@p{1QtOB?y^%n4kF`bawV4`)}jc>$;e^ToiQJvs!skG1w
z+_}yl0wLgT5-~&4B4(jG4xiKH5Oa9P<~b(j>uc~n-K9yPcGh4b3#q5Ew)LgLkER*
z63g~s<
z`YT1d7}Qq;$Xwga5|HfsdZb}2PD89)i4_j0uEF2Tr!-7>YG{gePUeVt{DD&-@|J1?
zT2CEK&T`50oj-#h`{XP%#bn!zHPu@1zo@hzkX)mpqUW>gbmCBq`oXopCBe|?yfSMc
z>HLdA82Wg1)XJQfbT4=MT}`KS=lYZ~m4_&QVP`RTNYNMeiEk;2tsK`hAEqBkgpk2E
zEhAMCMnwmwyu}m-g9Hnl3NU%ro&H;8a%9F^!U||ljo`3jnk5cI^VRub>r}`gnH($I
zuNI7``kepEF*r<;yGzq|`?u*UwGuv!n7*@rHGOcy&x)^&2AyFw!WUQ&VH11__1{zE63i_&4}_M7RgCU6Vi+UgWwYOe-R
z#yY-tA@5&2Vha{X?W3%J;l>ix(o|>e#{jV={_*et7V$1JwPPaZv!Yg8sJN^&+edN*
z_Tp?fTYkp=XuY_bz?_2ZwLIcM8#BU}V8O1)Q2vg64iU=1Z+4ZtZ8S_cO(ulOs%@ht
zKuPE`$OSMUCiTpUg0T^5smq_5unjhd@9gF`Riy-$EQQi8_pS9_Xwjv|Ij{ekbby7D
zQ&aYz4l~pPa!WS!p!f0tr(A;044-EblylVT?X^TLHxLObvh^1K%i9W~&k?d@*cY82
zQXS`cfj2VPi}SuDMI~;f83vI-*lr_(|7-h6#N#r-+-tu%B*^u+7%Y=I3>x)e4Ipp?
z1%okf=x{S2#XLgctcY$GKZhVD5vZto$9
zc7Ram9^`*n+`Io^7R*fj#F7LfHal2iVW!KgXaKCNS2Pb(y)>|c5B9!{ya#u4HRBH1
z&nVITyV@f2lZd6lOT_GPdEBWF!VVa06M#(q<3>d7_4`-MH*b7ncNgn!7^088GS^{FOX>U&yDmc0#2l0Ns
zDP4I#Y52}x4Omrn4mFZ3^1P}{KTK(`NnY5p%XoZ^V$`OcC-#mp0)8vAkrr*9B*gE<
zJP6WaqX?;c3k++T%RYY4^TZ+&OONkELN^o*4`WxT>$vdPF`Ljl_HJlu%Ql8t%rUpdSY|^-2cba_0R;(DZ;uy#Hgt7|SkGa;ioCu*QwPyD
z)?UT)Cy68QArY7xFQ;P^h6>Kt8LN+21GOq=%E6}}2`
zk;zI6NR+knyOMp9+h-u1XEF!0(~HWA4~F{Qn=pp)xN=N_5B)k*)!Q;D6Jg-kbE=a4
z+*bv|_haZB(zJ?feo*X)q?+a;bdQ*&`(G07&?7*hR`A_Hm8x!-WWHXj>kQ$2Pq6v}
z2A&C#T=y+!P+XWZUl?uVyUC5)BrP0nN`diMxsy>Z70^%VdG5U~6q_D1^wdNzLT>cb
z6!4*Zy_)AYPGX>C7P0>g7B9I48DbglAY9n*rw9wshd;q3D2HjWGQw68VO*LaN35$&
z%$ei0t2|Sc7OU(c4x2uKwGz9!BW$m4BW5fmBhuox8T(5LM@_(TO*0rrz4^sZ4iiKZ
zU*(r`J}QA1*{Cg?ymm;^kY3A5h9Fj91)HZCcv5mxpCH6#mHH|hj=8&*bj3WJWlexq
zH}wU|fMYiUjAn|EEkB4(Plo{D3QLRN(*U!OH4ldaEY_a^w9b1g&NbYGzO{sNcAh~
zGX0+thvGBDvDD0>?HHKMo~)2@8Uyb|C_s_iA$v=gBi}V!zJZyBFn08zc4&r@LZ>y1
zgKlqdr~Fk8V-Mco#T;kNK_|qHAdIfH=*fCd#l~^}Tg>Hwti-+l&1^}|$a$wYiT0}6vjugfP+mr}d
zdLGP=X?0(pbFHnoPRrK=U&P3rb*7)KjfmMy7)#VdmG
zkroio7Idm<7IhXUA|Nzh>3p>nPWOS#KxX-R6Vei&*5OCmp+aJ)-3BgZ4m;A652ztB
zqrQ$9*?1Ln$MyV`vY@Z2p#P?GG6;{%oc-5;Ye3YZ6`voYl-0FDM!<>C9RPn{0pM)
zV-Vh3LQN)B$F74@HL(XE4^|vs@6+{J+@Sk1s5bST3)Om?7P<7}OAU&18Ok|1U>*7)
z)C!Mx%a5UfWJ3D@ycZQ#STFHZ<;O9X3Kt|I>^3l|iuI^l{Xza>3g6
z{*4h=W5AN;P@S)U8ECfx)3<;>bioY5*qCe4Hcgz%FgkclBew7=i4j?Fu0;ymkCPp9
zp{A}_rAT3myU$DM@&@3Z6Zi{IdRAL-%voO
zHY8l}Gi7=Uey1>2EVIJc+fYfIC{%Gm1YIeHwXxPp>bVN`On!$%8Ocz2AvIC)-?=Wm
z+YI%(=l6pVMvR4>`P@G7Y$;q3U*A$`!OUB}QRn4eV&O$wyDn=UtEn1;L?aoZ
zv2FV}>w^^(`8q1oF>-^1bW2(FS>NYZ#E{e5GPf=qS((n2H^t9Y0CRTZ*+JLNhXs20
zZ>190`!6)^Cwn{xHAV?dM185S(kzB%C*v`gkaWNfFq43h=0SA7Rbn*4L3!)>4opOQ
zdoDw3weC3vK||w%&385zmCOc8i;P@o&_?LaF38(%kh((V?$C0f{yK^Beyggb<=`
zB&2f!2C&p6RWu;Ls+hyf4?xaMS(@bkjy(Oin1g%o1`Ct9a
zTx{RXDFf4sBCTZ*q9VsEDc}(Y6>|rW_7rZr?o@St81EK>m$DkVQu6&3_rfou+8ugP
zfG5nsg{h%D@qiTdJ*Q!r;v4eiFV6y^ar0+{fR`~8d;DE+s`C4%nz6HiRznUmJ=qSR
zx;)xzU;Bz~85r$FSKUYI{B7}oT2GdHizykau^m3895q+)g+f0C-Yo|9RJ_djpykhtX4#T3f==wkd(tS(Mr_R@DbM
z9W_cHXKTX{_hrx^OPC2QR6(2M2X22qr2!|%OMGXvF(}CXKo|f0yoTS1gz-gHkad)=
zPpW8Df8bQ>{jh@}$(>b!F0s>tIl)d$e>tjGob!{?_e)63D?gL^X4I^hh`YJGV^vUdjN
zELt~H$^#JHT2)~8@GBH;N75dY^`U#w?e`RlvXNE<%ZIP7+(oPcds(uF$S7#|K<1qV
z(<#|1%-V9b`CmZAw`Y4}&Jtj|ETm#aCbMYQV(g8LU7qZyj{#rW5JUNu%RZC7T~VB9
z2q~bi{IuBwVzA+Chfav0F|Yl#0ScAp{`WdxT51->YtsUCj}Qf0{`-g_vxN
z2$|U6;}QS)Q-n=0T&6ISQwu`upPxkrMRyEWSO-!7N9!tav=krl6HEi7XIO!Af=9P`ipzNL9n~l(5uLi{8rT
zWW#QJvyB2+OefkHn;mbJzwMpMI5BzR{!ehlniH+js?Rg=P&~Qz{NMqi5g)Dgi4nm~
zRugLuKVDZz&>bpC$UmjG=6O7x09!u1cp-3CMLmkgzwOh}!p^0WFhOq_>c=H_x5o)aF^rIkWc%}s0=*+z3I0s=VKiv-WM6Rr86=vsw-
zgMe<|7feB0$=dB5{)^k>I*R&Uuu-tZA`xthgZCWtuE?o+-=hotCcQLhnwsv4%S#$|
z4JuawUTmOUNRw`(J^&E*ZS2Io+xQDXmFW&&M2b^ieC&I6;Fx?-I)tfnNZ=dwi^tAl
z3MN)3VwbrDj;HTwN9lp_icp}+H=@(6$|qz}4U
zq#d=3g@Y}-?I(>sS{cdYBTxccQgzgN<#bJQ=#vafD9_sOS63%%n+Ydl-^`TPK
z+@TPj3_D(xuKwjJ-B~|V%4bqPKOJ*=_2tGAbtZr_9~!SIIK={|zurx-ARr;}Y7n>~
z>&5WR1|h4|4{zKLo=2s!C0lZ(B|m)!u&$2|ok*uh&2%KOW{@v)^XmSZpz6X+ggGTuPx?Z*{&1QgKBstnaf&d81)II^)w%_>ERZu9x-
zAsQ6dhtbV{ra<>|c8iDZK-7T+jUq@Z&mLqD^C^AR}a`eM@x$}%L;kx9_(Prcf(e%uOuYilYs
zlaNhYfXcp3_E^wlc!FUd6EF!r3T7IY=eVSZwzF2LSyNCqVq(TX6l1+|czfKTe}gkA
z>iatORrVo4vmNlpLZZ)GFhyg>acQ?3b5NeufOrT}d}ViEEICxU?v-3^4n+~Io^`NE
z_Nhi`l-ck&K6B$Sp6hFg#vi8&xb{q5`_u<@tjFq--79w_VrLcTo}+bDWgZREx_9B4
zQcY%3*VF4>WZa0Q@_ufwFq|E`sxUNs1-IOhi`KjCFYdt?t)pj5w-?Rba$!PId5`p~
zMai(iv15gwm0l2c2O#}6FY%Ft;P-z8{C#(CiX%;-+2gP?GLNN|D3ZXv&pI
ze34rTOo{ZuCQyz!%o2JnbC8M-C@1mYDoCGV7w#hs7TUv|7YynOoyZgeMZdq$l}2AB
z0&bq^S*XniU_1
z4x{FfA^|)7^XMuyo#=Zkba&ALY6|RL9`7w+^(%(>ed?J+AzH#+U^XKyu+|;
ztiZa(Fc-6IZ@nMF&1UNL7>cEEDl~gtLqa30}KU&wNZmknH}F#Udp+5++hqB7qoE
z3-V~A_1Kz9GA8bA8D0d<8Y4sogiK~+-i;M5E#LVxFKl%N^EcXY^gIDKZ;L|fdk1vV
z>UN8&-c;|7YtJs%7!YxLjyAflaLOngOE(^7%W?h*>+g7c;Q(QrSAMPh?0xZh(OmfS;_kr+(-F>_PJk2M`y6p8I=i}bG7r4T{gv5%B4FyR
z9sTM9pL0)rrW=N#VMc9)CMDjEvB$I(-L_)Q5qb08V$%VwUhH;Uzee3
z4AW^+)$B=Bz0LAleYEmb>Q&I%y|JzfjTv84yEgWuwO)WiyTruZ6FuZf72E4(>b1*&
zTT^5Cx#RHAW5U9f;l1|@qa&k->9%Vz4(ay$RcVL7Y#i{Z%oUNrnNdj4AKh*#sJi13#U`La8jrf18Xqw
zAR7{fBD9ofDjBP8%UP>PqCMAYD%S_fnswV)hdz^x+t{ZujrVgZiO|(b1heN*!nr3!
zAig?zN~hd)&Cusgu{-#xjJsHDJhn4MwVqaY_RB}PtTuqV(;#!s_&N)K&M+4aJ7uLW
zAk=e8DCpLZnx`Qd`dCdKldw5OS3bc1N`-x~ik(^z1RJTV+|#>%iS>AADd`|!WrFBe
zXm2A{Ojm)`BFVkW-0=}TTFIW5Fz*HRF_e%9sdP?A}>ilckDO0!(!DIp0MnXo#w(|z!>Q1#v
zZJ4Xx6uaJ#>T4Tl
z1t3TPo@m#JaeJXTY{Rq&=o~X&zAuqaF#ZFOzniO(W`JI=7|t8(I=A}zZ-RNTH*HwefZy6
zJGw&Z!5nNmb{(5Ji=Fxi)`rT_n&gg-w=m|~TQamR`ikxRP*qo%i(_}Q`geK44bN7j
zMw;Gw){2Sd<@#22r_HgpWBD_g9hMt#>Smyn;Xo(5RXmRxzT++6hCu8N-e_`aM#DCV
zMft-|mNrbVrg@Nn=7A;REfurbMk+y27b&)Y4!Do#^!uHkx!Y52;}FcMnKF639ZOmb
z9WsFu556~L8Hrj=Z*aEsLdzqYUFsATsgn%OBvNKIoe4sE=!dq^Z`3-2&}E&0we#74
zqZ=YReuwWalic%uLx%}4EpLM^+NqdS>^Y?<(J$yP05qC@r#|Aq1I57_%pcYR}1Yj2FhFb(YBx-{@$Fr*N4*RN5k$ol>2Mw02HyA
z5m{;Be$^c5dDtD2LbC7G6$Gtid^=Q2~CCXCbvilEPQZms_=NKYlXmn1?DUN+Jgo4ye2
zRfD%r$C^%ax361>1+FJ`EKm`qPEh_vSF46){Z1T_&`j3?3qe@W0aEGv-clfCS86<7ikJsVosHu$Pgol(%KIvc_Yak6^Z
zy)9a0umRK#tB&ki4CywX?(Xj&zqP*W`}}dQWv%YL_h-1S^E%JtJdWc;Mb25m
z0hf!gyT|f`Oa$sb(woWKmF%1=q@^LQ?8@T&{a3Q@6b(}BBr%gvm&n1K`c7)KeC(-;
zaX=~i_7+ZT7w6G<-+~9|1I1vP*zLaT>-|DSdL?nUFcd&-4B~xT5Poku^yf0Vo*j`>
z+S*hJq`zXfCFfL}t>$hRdl+%ru>+y-z*jN53RfnhgzxC;5}(nW{t3cd&v{W6o{UqHC*z;f5YU=rlA6gBAlo
zGcGFabFOifKQ?EvDT_HWqY!|_kK+(Gt7y@W_FhLY9*Z{UB$W$5uF-eO4;_GhfLTgX
z%qDblw6L@rKtH7AomvHim;owZAip5mVj*6?G?N-@SA760^yA02_k@{L#V>x!WDPT2
z#1-W|{MQavb2lLkiq&002fGJ^zc^MY-XE%8GSU}5
zs`5B>3J1*H0wZGUSP_AgM*oHWH6Kfwopy%a1*pARgwXsL#1Wn}q5I54KwNEWJ0df?t?+hgdY=8Jaf{?j12S~?L@RlTJ8&jPs^g{!
z=v2`kz*omc;>RP~xd3i3>V+J;)jAkgwf;owX+wH1Qn^+Va;qov-~p2y5dK#?S1RdX
z)HHzMjIEX@Sg!<91iL1RK*!>33AoG1U|o{WF6I;4yG-H-jNH<^({#CatM8Zq)eWcC
zSWH`n{z0obVKTR8QI|?j*24_LXvgu@i3`=`th}93m?3MiPkkQmhZ1$N}j0dt3GGs!$IWwezgB-|L%n
zh3rBkz~=QM_qmM01WF
z!N>^>_C{`%t^LFvSIbAvBA}rC>ghWxP;Xi<1Ixb~kF-U^VJHyg
z{cRwwVT-5Mft9p350bB1Zr-7JuoK+1_$}WNm!+6JMFPUap1c*enI^4;zE|XhgH6yM
z-$YPW7ilTO-g(M;KI%j;I=MUU8PU7duby9%zikpDf7CQmehDh<)6
zrD>$xV%>dJXLWIspVf8GVq04e**o@Q_Pr;24h&FNLjAM!VeaEc5wmqfNdZi^d3|s5
zGkum>vMIVqAVToG2f@>lNR7#wDm%_vv3l1n8leMeRjGL{e=JxCdasbG4@h!yZevkL*h6VhCDy4@~+GXiAjo)Lx`_GA<&rtLW-ko_zivsI%ckx
z{2e|}%1yAPUbIB0KV2v7`awq
zeMrOncMm|Xqx+zN&;j~2Y?j5_DS@V4zgTE_z)+**1*(qs>4&3Tz}>I`eL+3*3G|jc
zo8A(<(H#wSc8&N-+oxwW7n*z|oi66u+G^HpuJ&%G0h+uB^jEWQBD9PqD5Z3Oit;u$
zRtuf{R?Y9?+fw2(GG-xtsBy5#qEExeC6&MIKT)r>&`p|9U)ER$^^P@2!4isRs}J_;
z>m3%>JkaK2oT`xmDR`+5(#&P~xz|@d7jY2h*L=`o(UxINFTOP0+dD-XWC%dZIepZm
z#jA^GR}qp51je>wP0*H;qX{7lH1!yLr%=gK88XE5dvH;E^HyOU@rt{DlV{WJ$iBO7
zAr^i1wm0YqB<;ENBNzG9@k;MQ7|W^&+qy{L(H6+Zd!Son<#Yi
zT8tacKY^U!j%g9Z%N-LUgZ1HOY*Vv50p`tbut}r0eLoGQ)|l1X@<#g0P?~nLrrI8y
zUlOKo*cg%7wDWQ)ty6*4zeN-7WN>UbyTe8ltq}h&;85lN05}9iXO5MP=U8LXBG|{0
zc$1(*^Whh~U#{$Q(dQECss?QXTtu6X3c6bzW!v)BJ4Yw;*%Wx$SplG&16y3Ucg~Ev
z_gwK!{e;Wpf;td5>H@0J?L-}vSKeG6WYo>M7X+*?A(dVfpC)N0Jp({?juU)_9U|je
zV`=IOy-0drmQ-G>h7Lg8LQ2u&I~1BOcwAc0&bLpheIrb!)dRx27$PFmGZP)ZSU>~@
z_M6MagpCP4u9J((d&)9E8~|gM4!V@mbMd{7HdOzl6OYA7FLjv4#_3pjtlrIm0!gF<
zm55f=j9;v5uxqv(ipzSp4rls3`(~0-3%$wLBniWUE|4)?>l5uxz9#eASvtrc_R)gC
zL-Q#yx-}kl1m5YegtaCOu!w8Nhi{D54*zs}R%X46Q$tVm_ZSpNxV6$j=x2Oba~fjR
z`gXes?vT?jJ?8oTC8?m(B|E(8W(
zldC4uX5rT(H7pbZ)NMW55Rb5cy1-?468gp0CfFVQ+1N#`UW1K(69_gBLbYJHR=vOE
zJGln%P9y~L_q$XKu~lnDOS$F22~M~Y90EPrcF2ycAiU?~S$Cb2{mV;6?keu_E@VSI
zRHI>QP;ggjxY+Hzm{yP%|_H^IwJNg<3=Z`(|4*gdgYGDz+|&h6;V6
zNje(`ck@)f-qa4j@AT?=YbQX+VVT$Bl<_DE;*#nFVwMR~YO8f$^@<)rT3}!$N*eEh
zoRatbp#9;$ez7^}Rwfcxs;sP_Ne^050Ll$G$1K=Ya(26Uq^Zl-QteG_;(?fzmf-(~jHfUYzoAYW3zy
z1u(0~&G%JvSvtXe6TrCb*xi<8c6z^aee~K0=g!A{;t&^HtSz#-zowKSlB`Ln@a)Zy
zqXe1XJhrb9lMLAOFEas9aE5j}HYtPRIVEwu<5`L9C13U?8HfHQ*cr>!@(-TCMS;Zf
z=a`+q?_9*bYb?kf8_3N@Vh_s`gs1DkYsn3Rb-bMP{02_1w`NoQVsTEX`l-dRD>^pwe
z;Q5_bFTGl6-Q>+XwgFo(4>$>309za&;sBRjg!HJu3AzH=oGuh0tXsAZv{pKhPA?|j
z1<{{_6)7UX1|o%P9_07mJ0^jJk%zb~0K6)QFkNavm5(&fbpbso6x446*eu3Wru6d2
zDhOwziA8tf9HgM%dBQLqDXOPqT0gv-z709(R1B)M72(Wl1HKs52F;z3Jr!&*?7$SL
zgLBj=#jw<)aaI(_0lFpb`CS=8b5M@^GDbR<0E_&W&|#9^&oatlk44(VfoaIQ@N?l{
zCy-&2P0J})Z}@Qz66qGH>31a(zP!?@B4!PT1zu%j;oaI65uG)s!l2Spc>9{&`4b7e
zDw&3*JwXp}C(cjJ=c_h}Z%lh`PEZ`Jg2ebO)5cat=R$ARIH%zyxkQ5TDzR92QzdRE
z3hIUH#4GeW!MNQBF(|ivGsY#{end23WC41^;+E(*L?MOFH0ZuyR&>I0tR(aNB9zgl
zZXlJQxLr3HCk+NW435F{110DCMCprR^H!yh7A&clU{=SqO703f3+FG*UNDr+D0@yI1{De3U}N*xkkC+}Iw!
z+mB`*AX$xRQhiqNabYP8T9+~8&5Shl>$raUh=@(k#iaucxG||1t23i>NwX=qJ8ydf
z3IvOLouoPb`uH!y^B@PTvcell*z$U;2#vLl77N&?i5nEYlsTYwCLxl8=k<%Ajmd$S
z-fCS4+3%ycMu8l?N)ETIakH08(&t5H$wX|aHb)UPZKDc^~`SUMgo&w
znI-rKID=HIz+9_V>LkhlW*>Xm@!%D2D%DlID)
zVu4$|fQY)S%OCF&FpDDPT)u;-Yj$fqA>yDRya+Y2FDCy1?Qtg+jt_5OJ5O-+xX&rG
zo&D;-lZ7e>zafx+OkgI8owSaS+d)i(T>=Y`RvjqnMpWrd5N1%i3~ML^EdHD)8a!$!~Fg2)>6<;~JH
z`qV@9P}Cp``tue_J{6@Lv2ftUPfVq%`Y!GDJOL*j+hh9$_Yj7q&-x{fE$?^G}Igrn>EpvK1@_`xpdwoMi8Y*OmM00S)t
z`m@_tsRw+d5&mu|`-K+q`}5xvy7Pd7(*@mLmbOgL+zB+q*0$Hq_n;hcfE7|!fVueL
zlgq2AF=SHZ~)X;e_=1V>d3>g6R|L}+pq5<)#Xc4F7F
z7oYp`&QaYOMQNo`YxBx2D0siOiKG0E^#uCUy1nNho8YR#_;mv=Y#KmW_#iy3JG|wKdq-*DK!Emg1w`bXUKxIWcG1uS%M_w8V{-$Y3N>
zvl3L_mJblVrr_N)?<(VZ&bO}3VZ_yz#=EL_VAt)+)zL|sPU9N|*kF6>~4I7DyX~}0%
zY$=qDy-gdZo@?7Dnib3YZaid1_JP6`pop1+M3I;%IKul
z+*RC7iLqmnFchQN=Qr#tRLf~!EwuxChO#-WS8n4|gY-_$v8r-i?G;V8iKwdzbMVqV
z0>iu1!3*L1$WZEyC$j){S^;q1e?Oryfm=D1S^J%KMBE_xg;Zfk=0bH;EeIkPBoSc^qSb
z=w=kkqKQ2PPA$zs{Dhiqr!N-W-{e_b$e1|>$BOghIF(OnBGvdR`q<*?si(|2|&^q{IU!GuZ!NTR<{y
zMrFhHKeK{%KV@znW!j>0d0AbYI45}J;yS$r)K7Mz0jPo}4?vNBjBlZ>3<9}AT`
ziZa)PU^Q+)wvh`^=}OD#WIu4Th)0iUdi4Pv{QfUl3??H!lOMA(q+5G1CDnd`%s1sHwPLZqo?l0&c5DIOak
zyz?91R&|e9^p?k>iZ>VBWR**E<_mA|3ms0CeV<{d&Z!=Y@##nPppA5u>#u|+zUNdSY+gZwN`^?$CoV|EIeb-CL9H^w460Afvon0s*vn$NMAB9KcLUH
z_Ca@*wK*(oA>S}^d@_fpR^^pP4{oH%I;eM77&G-)@;wJ
z>OA-jP4VhZ6rd8@MkKSw9MVcX)YK0f9_g4_DUW3?&Ahz25pt+}`oZSv<+iweB|V`f
zV{`BJm2SvhMmsxk_sjG(wdR-DTAczh(HxS#+wPw^tb2c>*=L3&d*!
zgxiSh3ek_`?|Yxov{Rt_(z)e$*~}@lp;YF?tv=C1Q7!TAeWJXl@RI~h0&hgWQABW3
zW-3N~`TRI_s(8s@$!UCS2e$j;Q4f|@sVh;uTE|cN@Sw9=F-J6wZuX1izAH;_UEN1a
zn-`gpuH?+1>zeN6%^HtwFQJa}xdGRr+ah1zbA2J>a?pZEKtI={1-J&kKk;AgkF+0+T8}(>rx%EmOScVCg
z!#ASTnxNJj3nW9<@NS~1U@iCnLtkp?91I3_qV;1HsQZW(7n3fklH5)&B@=X-GZhhC7>u+@
zh|sRw&4x;k^ekQXGvi-Zqh892vYXtw8!|HhEZ(|r{?av@vWJH5`^mVYFTXUL)=gHr
zDUcVnm+~Z)hwAS~(~Nnf0)`#hPaeJE663X+7d4@|@2KLbB%eCX&8K2S^3%ncrygHGt2r#PfQ|7rEUhOkm
z%eT(+1dEsJ5V4b@tNyVCTxc=P_d5NSKJQ!J@>Un@&ElrNvpSPs9zZX|dZn)wgIXB*
zY*UbBA`&_K+7L9BCKh|f=Dq{j&n+y~yTKJp$Q#$qo93G=1(zB`LCgS#NS>#$5tHq;
z!#x#S+if3SbiV|CEZK-vRr=3ZbWoG6stBdGp6{v5{TU--`yYAQbv++lZl$I5_2x)k
zr7kcPB)THX5vUSL{(U8}v5?Ny_!uu3ES4@K_Fy14PMm%*eg4oPhMj>GlH==XGtK~6
z>!G@5M2CN)S)!QtkoV%a?oITeXGoS~B|!5t&(Jr6
zJ`1VEesVdc6Jk;_oiL-KS!3Ihd0$d3xpN(9h(NZ;15hG=L;4J7#=WRBfkfN>%NWi!
zLo)Cw?$M8&5cQeU7&_MtCTDeTZWh8G`~f`{r$Mbeu*k8!Z^;89mKI=kKlf62XWWraM}-f$gz=r_quwcrq*>5fTI3CW^SsCMN~5LO
z7D+muEFz8VZqp0yO0^Su;d=Jh&tgS~aYGgH{T69{BRBLIFmj8bh7~wb=O6d%r~BdQ
zo#!fHt@VAB|U`~jD(HAn28g{G7S;-0r5K89bGHtF1_i?>bL6J1f5`9Yli;B
z#+ECd&2dW(#_FR6F^KeVhWpvE#Q>d~?lKeliQ@>6Y$wGcWV(G6$4V5)>(WgBIBZ%ux%3=Ck|D7>CA#Tbs=pqyc*M;}xKW
z>?nFLN5S~<9UChh*ZW8Yl((=X#p7{m0wK0U(H-(e$0G{pZ)n@*UUi7ShWGnv5in_x
z&~w6|7wE{7y!LqTF&cMMC8YyObg5ml#R3}4HcJ;-R5i7%zrGd{_>Ol|XIJ6Al6wb*
z&HZIC25cLh9BdInXZ?kG&5op276ff>XhH6kdMIrRsS|1W?&UbY>|(k09Z26f2zM^M
zxxL}OVhhWQ)>6j^!rHh_3(JfysKipSX+z*28sXkt?p2|&wb=Grt1s6abiXoU4sKXI
z2?;d^ce1Pkp+O<-K4+8Qv6;>F_)^(OA(JOKXz}CYQZqjt+OV?uy82~W7nD@q9ED>@
z<>LnaZrZRuE8MXLp>Lr?Ne4(gqagM20)6qxC&v28o}P~%Rh1Y&Sat`Qeg#>x&m9zL
zQ;2MNzg45#7z`TBhM-&?1Lhz^gBn-qmE3nOk2lWBE+N_?l2GE^-ty@SrNO_UkCpw5
zQvE1`o6Dk?Vk+RzuYb;`u*GYQmL(58oDLlj1aMw+IaSSdfs=$jg_xf>T!h5gKK?kG
zS!18tR?0is7mUfgMq{a5yosqsXu`UNh#GFbfu-}7nYuEH7CUX1kfyq3t_4~wme)2?>zn2!
zIiKZnv~l+(W0?;Ip?8N85$&NC7|V3KH;2Gdsz<+T7Ugce`6F5OZ8ed1p7(2d*6W*1
z?IeJ&^zW;;5gI!Q_fiMVifsVYWRzy6ceX5&7YTAPyskc|JXp80zNI&F_jGE#PkF8d
zEs9j@oQC~+UkOYg7GJw&e!4a74={`GXNswg7tYXc1<(``pC4kwI5g-Tt;kr>B`$`t
zafkGJ7vnh;Dc4|{w*xMqA+@J#-T1b9`VFfM{qzMY?rnqRvOAERMzpQP-tPN1WL*7OSiQEEg@G^yXlxMJ+JV@|m8(
zyBZNB##3o|`v^PV;e9*70mmq^6>$$nus#dXsfpTd|Mrlko07FG-pa{)xh}SI%HaSK
zNb0MYgz$3?9fekZs656B#xp+sG-$Tt^%7>4!fdRyWlJPyzgNT^Z;z2tKKm7TW8G*ppY;q63rF?4FIEiH~Y#
zX)GL$u>9$`R}xPMi!>#-)1%ikhE&jsEtu|!oP>uxh=jXM|F$;wOd#Ru3ZkL_BbdI$
zT|3DY@imo3_l=ju*OWx@<<@y7B0pY_nOc1R^!(C8A-(5fT_%t|7Z3BrJbBc+5nluf
zZNx_?Umh2O_L00;B8)Q}lfsuSs%ojDjGN5+zhn}grO|iwhy9UjOmpLQENm>bR68lV
zVkU$Mvz!|V{_jmRR58=elgU|xrFWO#6Yg+H8y0x!a>|9qI_)JGdo(GF<(v4_u8n-%
ziOmi1$TZ`QHf+mA28O!$xlMsWG7gzKY8P4n@yHq(&rXr*{?X|}aq|;vi0{VxmjtfyBPpjVok6KxU?`20ry~uR>Qs3g%59mir6n1>nZetH
zY~D)yPUE!ZwvWVj&tiMS#Bu-8h|0ZO1&Bq#!7KDnPg|wZKj~*>*m{qdgw}EdY&V03
z5Zsk`otaIxRnQoHOXIE<_r;rHwz|zrahd9#@RNrEF1x@H?`K`QzKEyKpbrdY7dlGX
z=~S@xvqrhax8)WsHWxQP`VA5E)E0zk9oOWm=$Af_qJyvF@2bSB#?s_j0M5CaB2tI2
zv6r&7pqxekRB1>*+En7T(Y!4sV|li=4bus#u?AQB7onEy1Og_7cYUXpuU2RmAi;%q
z(UTgsm{_*l8iB$y?f7~!@OegCElWYHQpj5bEk=hHoAnU|PI?G;lUm)cXAve?7wo&f
zDfN#lNM~fd$?a5ReM-z2@B)W}i2#-t@_zc+D!7+cABAr#lnd2f_}YSGF{$bkY|mnz
zS0|SfQ*=DMbSEPL5+3ehCTEqGf^@QEcQWs(jENa9JHsT+wq(N+`VwkZ>e48JHcQJ9
zuB5tnOPeActQ^R`3sz=U012>AD3$|6LegbVF%iA47wce{q%+Nw3x0oP`lZ7aT+{+a
zsaTMn*XkGcnA5wh4ttXMwceNgN{w`;X?^@4=uU4$D8%N&tk=^s4ct#gNr<`hU-W^!
zl+|T7V@cU6Ulx{)ts;k`?J>}SKfU`@TVmSV?6Y6uQ{wcmTU-DjgQVC(gWHV}nV%?c
zf@C+Z9vZzq8RZ|fq>5&;%gyPtEI6?AM|s~5!EWeQ>c7y7cwi!uaa~Z)uSt7wFleke
zNo+!1v;KDXV9GclBGY?pLV_sC^NOH$F5-==*wq
zS;>7{vQxj5SA$Cbp+K6JwfK2h#gl&j3gA|wpKP}3+O!bzYyN3UuaY&ncg_HF_o}@
zx4135P_u|j+LGx_?Z+>f5)74qjg<2b?Dk-ZZ)N>;joda{5!+Im0S`aaql%Fl5YjC@
zmS*&e)%P?|Uf`ys?xFUg=%4ZZ;9^KtD0Wdn)B$}9c^g)db!F+bc1XRnJ^RQ5b19=P;p86^G@?r9qJurL-|cWVz+PAUr=9Gxzr6I
zqX+GJlY;R)l4K`=qt26*J4cWq)VzUDDiXj|2Cod7frX`ygQVyttxxyT?n!c?{F|jb
z0MP#!y0Ya?9$AA<2W7wTl{r#+kl=U-?IRkw6k4|cb!ab4xtcCZ~CLC2&>$e~Fl+u4peQTwm-mVirSotvLawg=~ww_V@dIa4|pzcSS
zcwqwi7T{GUyfCXg%~Wi{+)Rl`X$
z{u?Nf^UZG?HX*6Y5s~gq;0BDBcuRFTu#%-lsdw=f5$Dufz6DuJ8(|8at-&I;Ms(5k
z`KlXVketu(`5hwNuko3Qo1h-iE7L){65EXf+!={rmAndUg>Fpq#ES2u@1LE+a%pBF
zTERLpeeRf~=7|Ym&eR_9$of}3OcJr>FFDkE?wSOj7R0r(^fU{h(r%6%xI0&J>zcM#
z9im+Hd?P;6)^_mDIp?P0rFUD>2)IpXejvsxsn8hLxmV!ggGKGfJk!6_U2Y-aTbE>;
zB~7N9Fv=L*idJ7rYRI5kaQir@Po7*t_@%x2(0+6Xby#pW{~HhTJWV8wEH*Q{aN|0Q
zYVH?sXD++}DN`|!mpzUTh-%