diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py
index 581008d3f1b9d..ea4cf89eb4e7c 100644
--- a/libs/partners/anthropic/langchain_anthropic/chat_models.py
+++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py
@@ -1197,6 +1197,9 @@ def get_weather(location: str) -> str:
     "name": "example-mcp"}]``
     """
 
+    include_response_headers: bool = False
+    """Whether to include response headers in the output message response_metadata."""
+
     @property
     def _llm_type(self) -> str:
         """Return type of chat model."""
@@ -1315,12 +1318,26 @@ def _create(self, payload: dict) -> Any:
         else:
             return self._client.messages.create(**payload)
 
+    def _create_with_raw_response(self, payload: dict) -> Any:
+        if "betas" in payload:
+            return self._client.beta.messages.with_raw_response.create(**payload)
+        else:
+            return self._client.messages.with_raw_response.create(**payload)
+
     async def _acreate(self, payload: dict) -> Any:
         if "betas" in payload:
             return await self._async_client.beta.messages.create(**payload)
         else:
             return await self._async_client.messages.create(**payload)
 
+    async def _acreate_with_raw_response(self, payload: dict) -> Any:
+        if "betas" in payload:
+            return await self._async_client.beta.messages.with_raw_response.create(
+                **payload
+            )
+        else:
+            return await self._async_client.messages.with_raw_response.create(**payload)
+
     def _stream(
         self,
         messages: list[BaseMessage],
@@ -1341,6 +1358,10 @@ def _stream(
                 and not _documents_in_params(payload)
                 and not _thinking_in_params(payload)
             )
+            headers = {}
+            if self.include_response_headers and hasattr(stream, "response"):
+                headers = dict(stream.response.headers)
+
             block_start_event = None
             for event in stream:
                 msg, block_start_event = _make_message_chunk_from_anthropic_event(
@@ -1350,6 +1371,10 @@ def _stream(
                     block_start_event=block_start_event,
                 )
                 if msg is not None:
+                    if headers and msg.response_metadata is not None:
+                        msg.response_metadata["headers"] = headers
+                    elif headers:
+                        msg.response_metadata = {"headers": headers}
                     chunk = ChatGenerationChunk(message=msg)
                     if run_manager and isinstance(msg.content, str):
                         run_manager.on_llm_new_token(msg.content, chunk=chunk)
@@ -1377,6 +1402,10 @@ async def _astream(
                 and not _documents_in_params(payload)
                 and not _thinking_in_params(payload)
             )
+            headers = {}
+            if self.include_response_headers and hasattr(stream, "response"):
+                headers = dict(stream.response.headers)
+
             block_start_event = None
             async for event in stream:
                 msg, block_start_event = _make_message_chunk_from_anthropic_event(
@@ -1386,6 +1415,10 @@ async def _astream(
                     block_start_event=block_start_event,
                 )
                 if msg is not None:
+                    if headers and msg.response_metadata is not None:
+                        msg.response_metadata["headers"] = headers
+                    elif headers:
+                        msg.response_metadata = {"headers": headers}
                     chunk = ChatGenerationChunk(message=msg)
                     if run_manager and isinstance(msg.content, str):
                         await run_manager.on_llm_new_token(msg.content, chunk=chunk)
@@ -1393,7 +1426,9 @@ async def _astream(
         except anthropic.BadRequestError as e:
             _handle_anthropic_bad_request(e)
 
-    def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
+    def _format_output(
+        self, data: Any, headers: Optional[dict] = None, **kwargs: Any
+    ) -> ChatResult:
         data_dict = data.model_dump()
         content = data_dict["content"]
 
@@ -1418,6 +1453,13 @@ def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
         }
         if "model" in llm_output and "model_name" not in llm_output:
             llm_output["model_name"] = llm_output["model"]
+        
+        # Only include response_metadata when headers are present
+        response_metadata = {}
+        if headers:
+            response_metadata = llm_output.copy()
+            response_metadata["headers"] = headers
+
         if (
             len(content) == 1
             and content[0]["type"] == "text"
@@ -1432,6 +1474,11 @@ def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
             )
         else:
             msg = AIMessage(content=content)
+        
+        # Set response metadata if headers are present
+        if response_metadata:
+            msg.response_metadata = response_metadata
+            
         msg.usage_metadata = _create_usage_metadata(data.usage)
         return ChatResult(
             generations=[ChatGeneration(message=msg)],
@@ -1452,10 +1499,16 @@ def _generate(
             return generate_from_stream(stream_iter)
         payload = self._get_request_payload(messages, stop=stop, **kwargs)
         try:
-            data = self._create(payload)
+            if self.include_response_headers:
+                raw_response = self._create_with_raw_response(payload)
+                data = raw_response.parse()
+                headers = dict(raw_response.headers)
+            else:
+                data = self._create(payload)
+                headers = {}
         except anthropic.BadRequestError as e:
             _handle_anthropic_bad_request(e)
-        return self._format_output(data, **kwargs)
+        return self._format_output(data, headers=headers, **kwargs)
 
     async def _agenerate(
         self,
@@ -1471,10 +1524,16 @@ async def _agenerate(
             return await agenerate_from_stream(stream_iter)
         payload = self._get_request_payload(messages, stop=stop, **kwargs)
         try:
-            data = await self._acreate(payload)
+            if self.include_response_headers:
+                raw_response = await self._acreate_with_raw_response(payload)
+                data = raw_response.parse()
+                headers = dict(raw_response.headers)
+            else:
+                data = await self._acreate(payload)
+                headers = {}
         except anthropic.BadRequestError as e:
             _handle_anthropic_bad_request(e)
-        return self._format_output(data, **kwargs)
+        return self._format_output(data, headers=headers, **kwargs)
 
     def _get_llm_for_structured_output_when_thinking_is_enabled(
         self,
diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py
index c40c5a4ab0a1b..579d7a0894543 100644
--- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py
+++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py
@@ -1082,3 +1082,67 @@ def test_files_api_pdf(block_format: str) -> None:
         ],
     }
     _ = llm.invoke([input_message])
+
+
+def test_anthropic_response_headers() -> None:
+    """Test ChatAnthropic response headers."""
+    chat_anthropic = ChatAnthropic(model=MODEL_NAME, include_response_headers=True)
+    query = "I'm Pickle Rick"
+    result = chat_anthropic.invoke(query)
+    headers = result.response_metadata["headers"]
+    assert headers
+    assert isinstance(headers, dict)
+    # Check for common HTTP headers
+    assert any(
+        key.lower() in ["content-type", "request-id", "x-request-id"]
+        for key in headers.keys()
+    )
+
+    # Stream
+    full: Optional[BaseMessageChunk] = None
+    for chunk in chat_anthropic.stream(query):
+        full = chunk if full is None else full + chunk
+    assert isinstance(full, AIMessage)
+    headers = full.response_metadata["headers"]
+    assert headers
+    assert isinstance(headers, dict)
+    assert any(
+        key.lower() in ["content-type", "request-id", "x-request-id"]
+        for key in headers.keys()
+    )
+
+
+async def test_anthropic_response_headers_async() -> None:
+    """Test ChatAnthropic response headers for async methods."""
+    chat_anthropic = ChatAnthropic(model=MODEL_NAME, include_response_headers=True)
+    query = "I'm Pickle Rick"
+    result = await chat_anthropic.ainvoke(query)
+    headers = result.response_metadata["headers"]
+    assert headers
+    assert isinstance(headers, dict)
+    assert any(
+        key.lower() in ["content-type", "request-id", "x-request-id"]
+        for key in headers.keys()
+    )
+
+    # Stream
+    full: Optional[BaseMessageChunk] = None
+    async for chunk in chat_anthropic.astream(query):
+        full = chunk if full is None else full + chunk
+    assert isinstance(full, AIMessage)
+    headers = full.response_metadata["headers"]
+    assert headers
+    assert isinstance(headers, dict)
+    assert any(
+        key.lower() in ["content-type", "request-id", "x-request-id"]
+        for key in headers.keys()
+    )
+
+
+def test_anthropic_no_response_headers_by_default() -> None:
+    """Test that headers are not included by default."""
+    chat_anthropic = ChatAnthropic(model=MODEL_NAME)
+    query = "I'm Pickle Rick"
+    result = chat_anthropic.invoke(query)
+    # assert no response headers if include_response_headers is not set
+    assert "headers" not in result.response_metadata
diff --git a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py
index 2d418b6bddda6..c9f0343f9ae15 100644
--- a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py
+++ b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py
@@ -1056,3 +1056,125 @@ def mock_create(*args: Any, **kwargs: Any) -> Message:
     # Test headers are correctly propagated to request
     payload = llm._get_request_payload([input_message])
     assert payload["mcp_servers"][0]["authorization_token"] == "PLACEHOLDER"
+
+
+def test_chat_anthropic_include_response_headers_initialization() -> None:
+    """Test ChatAnthropic include_response_headers initialization."""
+    # Default should be False
+    llm = ChatAnthropic(model="claude-3-sonnet-20240229")
+    assert llm.include_response_headers is False
+
+    # Explicit setting should work
+    llm_with_headers = ChatAnthropic(
+        model="claude-3-sonnet-20240229", include_response_headers=True
+    )
+    assert llm_with_headers.include_response_headers is True
+
+
+def test_chat_anthropic_invoke_without_response_headers() -> None:
+    """Test that headers are not included when include_response_headers=False."""
+    llm = ChatAnthropic(model="claude-3-sonnet-20240229")
+
+    mock_response = Message(
+        id="msg_123",
+        content=[TextBlock(type="text", text="Hello")],
+        model="claude-3-sonnet-20240229",
+        role="assistant",
+        stop_reason="end_turn",
+        stop_sequence=None,
+        type="message",
+        usage=Usage(input_tokens=10, output_tokens=5),
+    )
+
+    with patch.object(llm, "_client") as mock_client:
+        mock_client.messages.create.return_value = mock_response
+
+        result = llm.invoke("Hello")
+
+        # headers should not be in response_metadata if include_response_headers not set
+        assert "headers" not in result.response_metadata
+
+        # Verify client was called without raw_response
+        assert mock_client.messages.create.called
+        assert not mock_client.messages.with_raw_response.create.called
+
+
+def test_chat_anthropic_invoke_with_response_headers() -> None:
+    """Test that headers are included when include_response_headers=True."""
+    llm = ChatAnthropic(model="claude-3-sonnet-20240229", include_response_headers=True)
+
+    mock_response = Message(
+        id="msg_123",
+        content=[TextBlock(type="text", text="Hello")],
+        model="claude-3-sonnet-20240229",
+        role="assistant",
+        stop_reason="end_turn",
+        stop_sequence=None,
+        type="message",
+        usage=Usage(input_tokens=10, output_tokens=5),
+    )
+
+    # Mock raw response with headers
+    mock_raw_response = MagicMock()
+    mock_raw_response.parse.return_value = mock_response
+    mock_raw_response.headers = {
+        "content-type": "application/json",
+        "request-id": "req_123",
+    }
+
+    with patch.object(llm, "_client") as mock_client:
+        mock_client.messages.with_raw_response.create.return_value = mock_raw_response
+
+        result = llm.invoke("Hello")
+
+        # headers should be in response_metadata if include_response_headers is True
+        assert "headers" in result.response_metadata
+        headers = result.response_metadata["headers"]
+        assert headers["content-type"] == "application/json"
+        assert headers["request-id"] == "req_123"
+
+        # Verify client was called with raw_response
+        assert mock_client.messages.with_raw_response.create.called
+
+
+async def test_chat_anthropic_ainvoke_with_response_headers() -> None:
+    """Test headers included in async invoke when include_response_headers=True."""
+    llm = ChatAnthropic(model="claude-3-sonnet-20240229", include_response_headers=True)
+
+    mock_response = Message(
+        id="msg_123",
+        content=[TextBlock(type="text", text="Hello")],
+        model="claude-3-sonnet-20240229",
+        role="assistant",
+        stop_reason="end_turn",
+        stop_sequence=None,
+        type="message",
+        usage=Usage(input_tokens=10, output_tokens=5),
+    )
+
+    # Mock raw response with headers
+    mock_raw_response = MagicMock()
+    mock_raw_response.parse.return_value = mock_response
+    mock_raw_response.headers = {
+        "content-type": "application/json",
+        "request-id": "req_456",
+    }
+
+    with patch.object(llm, "_async_client") as mock_client:
+        # Create an async mock for the return value
+        from unittest.mock import AsyncMock
+
+        mock_client.messages.with_raw_response.create = AsyncMock(
+            return_value=mock_raw_response
+        )
+
+        result = await llm.ainvoke("Hello")
+
+        # headers should be in response_metadata if include_response_headers is True
+        assert "headers" in result.response_metadata
+        headers = result.response_metadata["headers"]
+        assert headers["content-type"] == "application/json"
+        assert headers["request-id"] == "req_456"
+
+        # Verify async client was called with raw_response
+        assert mock_client.messages.with_raw_response.create.called
diff --git a/libs/partners/anthropic/uv.lock b/libs/partners/anthropic/uv.lock
index 7f158b98f7ab8..dc495d5e0e725 100644
--- a/libs/partners/anthropic/uv.lock
+++ b/libs/partners/anthropic/uv.lock
@@ -1,4 +1,5 @@
 version = 1
+revision = 1
 requires-python = ">=3.9"
 resolution-markers = [
     "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'",
@@ -504,7 +505,7 @@ typing = [
 
 [[package]]
 name = "langchain-core"
-version = "0.3.63"
+version = "0.3.65"
 source = { editable = "../../core" }
 dependencies = [
     { name = "jsonpatch" },
@@ -519,7 +520,7 @@ dependencies = [
 [package.metadata]
 requires-dist = [
     { name = "jsonpatch", specifier = ">=1.33,<2.0" },
-    { name = "langsmith", specifier = ">=0.1.126,<0.4" },
+    { name = "langsmith", specifier = ">=0.3.45,<0.4" },
     { name = "packaging", specifier = ">=23.2,<25" },
     { name = "pydantic", specifier = ">=2.7.4" },
     { name = "pyyaml", specifier = ">=5.3" },
@@ -607,7 +608,7 @@ typing = [
 
 [[package]]
 name = "langsmith"
-version = "0.3.37"
+version = "0.3.45"
 source = { registry = "https://pypi.org/simple" }
 dependencies = [
     { name = "httpx" },
@@ -618,9 +619,9 @@ dependencies = [
     { name = "requests-toolbelt" },
     { name = "zstandard" },
 ]
-sdist = { url = "https://files.pythonhosted.org/packages/7b/d0/98daffe57c57c2f44c5d363df5004d8e530b8c9b15751f451d273fd1d4c8/langsmith-0.3.37.tar.gz", hash = "sha256:d49d9a12d24d3984d5b3e2b5915b525b4a29a4706ea9cadde43c980fba43fab0", size = 344645 }
+sdist = { url = "https://files.pythonhosted.org/packages/be/86/b941012013260f95af2e90a3d9415af4a76a003a28412033fc4b09f35731/langsmith-0.3.45.tar.gz", hash = "sha256:1df3c6820c73ed210b2c7bc5cdb7bfa19ddc9126cd03fdf0da54e2e171e6094d", size = 348201 }
 wheels = [
-    { url = "https://files.pythonhosted.org/packages/50/f2/5700dbeec7dca0aa57a6ed2f472fa3a323b46c85ab2bc446b2c7c8fb599e/langsmith-0.3.37-py3-none-any.whl", hash = "sha256:bdecca4eb48ba1799e821a33dbdca318ab202faa71a5bfa7d2358be6c3fd7eeb", size = 359308 },
+    { url = "https://files.pythonhosted.org/packages/6a/f4/c206c0888f8a506404cb4f16ad89593bdc2f70cf00de26a1a0a7a76ad7a3/langsmith-0.3.45-py3-none-any.whl", hash = "sha256:5b55f0518601fa65f3bb6b1a3100379a96aa7b3ed5e9380581615ba9c65ed8ed", size = 363002 },
 ]
 
 [[package]]