Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
# Models API
#===========================================

# Cerebras API (https://chat.cerebras.ai/)
# CEREBRAS_API_KEY="Fill your API key here"

# OpenAI API (https://platform.openai.com/signup)
# OPENAI_API_KEY="Fill your API key here"

Expand Down
363 changes: 223 additions & 140 deletions camel/agents/chat_agent.py

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions camel/configs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
from .base_config import BaseConfig
from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
from .cerebras_config import CEREBRAS_API_PARAMS, CerebrasConfig
from .cohere_config import COHERE_API_PARAMS, CohereConfig
from .cometapi_config import COMETAPI_API_PARAMS, CometAPIConfig
from .crynux_config import CRYNUX_API_PARAMS, CrynuxConfig
Expand Down Expand Up @@ -93,6 +94,8 @@
'SAMBA_CLOUD_API_PARAMS',
'TogetherAIConfig',
'TOGETHERAI_API_PARAMS',
'CerebrasConfig',
'CEREBRAS_API_PARAMS',
'CohereConfig',
'COHERE_API_PARAMS',
'CometAPIConfig',
Expand Down
96 changes: 96 additions & 0 deletions camel/configs/cerebras_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from __future__ import annotations

from typing import Dict, Optional, Sequence, Union

from camel.configs.base_config import BaseConfig


class CerebrasConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using Cerebras
compatibility.

Reference: https://inference-docs.cerebras.ai/resources/openai

Args:
temperature (float, optional): Sampling temperature to use, between
:obj:`0` and :obj:`2`. Higher values make the output more random,
while lower values make it more focused and deterministic.
(default: :obj:`None`)

top_p (float, optional): An alternative to sampling with temperature,
called nucleus sampling, where the model considers the results of
the tokens with top_p probability mass. So :obj:`0.1` means only
the tokens comprising the top 10% probability mass are considered.
(default: :obj:`None`)

response_format (object, optional): An object specifying the format
that the model must output.Setting to {"type": "json_object"}
enables JSON mode, which guarantees the message the model
generates is valid JSON. (default: :obj:`None`)

stream (bool, optional): If True, partial message deltas will be sent
as data-only server-sent events as they become available.
(default: :obj:`None`)

stop (str or list, optional): Up to :obj:`4` sequences where the API
will stop generating further tokens. (default: :obj:`None`)

max_tokens (int, optional): The maximum number of tokens to generate
in the chat completion. The total length of input tokens and
generated tokens is limited by the model's context length.
(default: :obj:`None`)

user (str, optional): A unique identifier representing your end-user,
which can help OpenAI to monitor and detect abuse.
(default: :obj:`None`)

tools (list[FunctionTool], optional): A list of tools the model may
call. Currently, only functions are supported as a tool. Use this
to provide a list of functions the model may generate JSON inputs
for. A max of 128 functions are supported.

tool_choice (Union[dict[str, str], str], optional): Controls which (if
any) tool is called by the model. :obj:`"none"` means the model
will not call any tool and instead generates a message.
:obj:`"auto"` means the model can pick between generating a
message or calling one or more tools. :obj:`"required"` means the
model must call one or more tools. Specifying a particular tool
via {"type": "function", "function": {"name": "my_function"}}
forces the model to call that tool. :obj:`"none"` is the default
when no tools are present. :obj:`"auto"` is the default if tools
are present.

reasoning_effort(str, optional): A parameter specifying the level of
reasoning used by certain model types. Valid values are :obj:
`"low"`, :obj:`"medium"`, or :obj:`"high"`. If set, it is only
applied to the model types that support it (e.g., :obj:`o1`,
:obj:`o1mini`, :obj:`o1preview`, :obj:`o3mini`). If not provided
or if the model type does not support it, this parameter is
ignored. (default: :obj:`None`)
"""

temperature: Optional[float] = None
top_p: Optional[float] = None
stream: Optional[bool] = None
stop: Optional[Union[str, Sequence[str]]] = None
max_tokens: Optional[int] = None
response_format: Optional[Dict] = None
user: Optional[str] = None
tool_choice: Optional[Union[Dict[str, str], str]] = None
reasoning_effort: Optional[str] = None


CEREBRAS_API_PARAMS = {param for param in CerebrasConfig.model_fields.keys()}
2 changes: 2 additions & 0 deletions camel/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from .azure_openai_model import AzureOpenAIModel
from .base_audio_model import BaseAudioModel
from .base_model import BaseModelBackend
from .cerebras_model import CerebrasModel
from .cohere_model import CohereModel
from .cometapi_model import CometAPIModel
from .crynux_model import CrynuxModel
Expand Down Expand Up @@ -71,6 +72,7 @@
'GroqModel',
'StubModel',
'ZhipuAIModel',
'CerebrasModel',
'CohereModel',
'CometAPIModel',
'ModelFactory',
Expand Down
83 changes: 83 additions & 0 deletions camel/models/cerebras_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
from typing import Any, Dict, Optional, Union

from camel.configs import CerebrasConfig
from camel.models.openai_compatible_model import OpenAICompatibleModel
from camel.types import ModelType
from camel.utils import (
BaseTokenCounter,
api_keys_required,
)


class CerebrasModel(OpenAICompatibleModel):
r"""LLM API served by Cerebras in a unified
OpenAICompatibleModel interface.

Args:
model_type (Union[ModelType, str]): Model for which a backend is
created.
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
that will be fed into:obj:`openai.ChatCompletion.create()`.
If:obj:`None`, :obj:`CerebrasConfig().as_dict()` will be used.
(default: :obj:`None`)
api_key (Optional[str], optional): The API key for authenticating
with the Cerebras service. (default: :obj:`None`).
url (Optional[str], optional): The url to the Cerebras service.
(default: :obj:`None`)
token_counter (Optional[BaseTokenCounter], optional): Token counter to
use for the model. If not provided, :obj:`OpenAITokenCounter(
ModelType.GPT_4O_MINI)` will be used.
(default: :obj:`None`)
timeout (Optional[float], optional): The timeout value in seconds for
API calls. If not provided, will fall back to the MODEL_TIMEOUT
environment variable or default to 180 seconds.
(default: :obj:`None`)
max_retries (int, optional): Maximum number of retries for API calls.
(default: :obj:`3`)
**kwargs (Any): Additional arguments to pass to the client
initialization.
"""

@api_keys_required([("api_key", "CEREBRAS_API_KEY")])
def __init__(
self,
model_type: Union[ModelType, str],
model_config_dict: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
url: Optional[str] = None,
token_counter: Optional[BaseTokenCounter] = None,
timeout: Optional[float] = None,
max_retries: int = 3,
**kwargs: Any,
) -> None:
if model_config_dict is None:
model_config_dict = CerebrasConfig().as_dict()
api_key = api_key or os.environ.get("CEREBRAS_API_KEY")
url = url or os.environ.get(
"CEREBRAS_API_BASE_URL", "https://api.cerebras.ai/v1"
)
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
super().__init__(
model_type=model_type,
model_config_dict=model_config_dict,
api_key=api_key,
url=url,
token_counter=token_counter,
timeout=timeout,
max_retries=max_retries,
**kwargs,
)
2 changes: 2 additions & 0 deletions camel/models/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from camel.models.aws_bedrock_model import AWSBedrockModel
from camel.models.azure_openai_model import AzureOpenAIModel
from camel.models.base_model import BaseModelBackend
from camel.models.cerebras_model import CerebrasModel
from camel.models.cohere_model import CohereModel
from camel.models.cometapi_model import CometAPIModel
from camel.models.crynux_model import CrynuxModel
Expand Down Expand Up @@ -89,6 +90,7 @@ class ModelFactory:
ModelPlatformType.AZURE: AzureOpenAIModel,
ModelPlatformType.ANTHROPIC: AnthropicModel,
ModelPlatformType.GROQ: GroqModel,
ModelPlatformType.CEREBRAS: CerebrasModel,
ModelPlatformType.COMETAPI: CometAPIModel,
ModelPlatformType.NEBIUS: NebiusModel,
ModelPlatformType.LMSTUDIO: LMStudioModel,
Expand Down
4 changes: 4 additions & 0 deletions camel/societies/workforce/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ class WorkforceEventBase(BaseModel):
default_factory=lambda: datetime.now(timezone.utc)
)

class LogEvent(WorkforceEventBase):
event_type: Literal["log"] = "log"
message: str
level: Literal["info", "warning", "error", "success"]

class WorkerCreatedEvent(WorkforceEventBase):
event_type: Literal["worker_created"] = "worker_created"
Expand Down
99 changes: 95 additions & 4 deletions camel/societies/workforce/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,99 @@

from pydantic import BaseModel, Field, field_validator

# generic role names that should trigger fallback in role identification
# used for workflow organization to avoid using generic names as folder names
GENERIC_ROLE_NAMES = frozenset(
{'assistant', 'agent', 'user', 'system', 'worker', 'helper'}
)


def is_generic_role_name(role_name: str) -> bool:
r"""Check if a role name is generic and should trigger fallback logic.

Generic role names are common, non-specific identifiers that don't
provide meaningful information about an agent's actual purpose.
When a role name is generic, fallback logic should be used to find
a more specific identifier (e.g., from LLM-generated agent_title
or description).

Args:
role_name (str): The role name to check (will be converted to
lowercase for case-insensitive comparison).

Returns:
bool: True if the role name is generic, False otherwise.

Example:
>>> is_generic_role_name("assistant")
True
>>> is_generic_role_name("data_analyst")
False
>>> is_generic_role_name("AGENT")
True
"""
return role_name.lower() in GENERIC_ROLE_NAMES


class WorkflowMetadata(BaseModel):
r"""Pydantic model for workflow metadata tracking.

This model defines the formal schema for workflow metadata that tracks
versioning, timestamps, and contextual information about saved workflows.
Used to maintain workflow history and enable proper version management.
"""

session_id: str = Field(
description="Session identifier for the workflow execution"
)
working_directory: str = Field(
description="Directory path where the workflow is stored"
)
created_at: str = Field(
description="ISO timestamp when workflow was first created"
)
updated_at: str = Field(
description="ISO timestamp of last modification to the workflow"
)
workflow_version: int = Field(
default=1, description="Version number, increments on updates"
)
agent_id: str = Field(
description="UUID of the agent that created/updated the workflow"
)
message_count: int = Field(
description="Number of messages in the workflow conversation"
)


class WorkflowConfig(BaseModel):
r"""Configuration for workflow memory management.

Centralizes all workflow-related configuration options to avoid scattered
settings across multiple files and methods.
"""

max_workflows_per_role: int = Field(
default=100,
description="Maximum number of workflows to keep per role folder",
)
workflow_filename_suffix: str = Field(
default="_workflow",
description="Suffix appended to workflow filenames",
)
workflow_folder_name: str = Field(
default="workforce_workflows",
description="Base folder name for storing workflows",
)
enable_versioning: bool = Field(
default=True,
description="Whether to track workflow versions",
)
default_max_files_to_load: int = Field(
default=3,
description="Default maximum number of workflow files to load",
)


class WorkerConf(BaseModel):
r"""The configuration of a worker."""
Expand Down Expand Up @@ -162,8 +255,7 @@ class TaskAnalysisResult(BaseModel):

# Common fields - always populated
reasoning: str = Field(
description="Explanation for the analysis result or recovery "
"decision"
description="Explanation for the analysis result or recovery decision"
)

recovery_strategy: Optional[RecoveryStrategy] = Field(
Expand Down Expand Up @@ -625,8 +717,7 @@ def wrapper(self, *args, **kwargs):
# This should not be reached, but just in case
if handle_exceptions:
logger.error(
f"Unexpected failure in {func.__name__}: "
f"{last_exception}"
f"Unexpected failure in {func.__name__}: {last_exception}"
)
return None
else:
Expand Down
Loading
Loading