Skip to content

pydantic_ai.toolsets

AbstractToolset

Bases: ABC, Generic[AgentDepsT]

A toolset is a collection of tools that can be used by an agent.

It is responsible for:

  • Listing the tools it contains
  • Validating the arguments of the tools
  • Calling the tools

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
class AbstractToolset(ABC, Generic[AgentDepsT]):
    """A toolset is a collection of tools that can be used by an agent.

    It is responsible for:

    - Listing the tools it contains
    - Validating the arguments of the tools
    - Calling the tools

    See [toolset docs](../toolsets.md) for more information.
    """

    @property
    @abstractmethod
    def id(self) -> str | None:
        """An ID for the toolset that is unique among all toolsets registered with the same agent.

        If you're implementing a concrete implementation that users can instantiate more than once, you should let them optionally pass a custom ID to the constructor and return that here.

        A toolset needs to have an ID in order to be used in a durable execution environment like Temporal, in which case the ID will be used to identify the toolset's activities within the workflow.
        """
        raise NotImplementedError()

    @property
    def label(self) -> str:
        """The name of the toolset for use in error messages."""
        label = self.__class__.__name__
        if self.id:  # pragma: no branch
            label += f' {self.id!r}'
        return label

    @property
    def tool_name_conflict_hint(self) -> str:
        """A hint for how to avoid name conflicts with other toolsets for use in error messages."""
        return 'Rename the tool or wrap the toolset in a `PrefixedToolset` to avoid name conflicts.'

    async def for_run(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
        """Return the toolset to use for this agent run.

        Called once per run, before `__aenter__`. Override this to return a fresh instance
        for per-run state isolation. Default: return `self` (shared across runs).
        """
        return self

    async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
        """Return the toolset to use for this run step.

        Called at the start of each run step. Override this to return a modified
        instance for per-step state transitions. If returning a new instance,
        you are responsible for managing any lifecycle transitions (exiting old
        inner toolsets, entering new ones). Default: return `self` (no per-step changes).
        """
        return self

    async def __aenter__(self) -> Self:
        """Enter the toolset context.

        This is where you can set up network connections in a concrete implementation.
        """
        return self

    async def __aexit__(self, *args: Any) -> bool | None:
        """Exit the toolset context.

        This is where you can tear down network connections in a concrete implementation.
        """
        return None

    async def get_instructions(
        self, ctx: RunContext[AgentDepsT]
    ) -> str | InstructionPart | Sequence[str | InstructionPart] | None:
        r"""Return instructions for how to use this toolset's tools.

        Override this method to provide instructions that help the agent understand
        how to use the tools in this toolset effectively.

        Simple implementations can return a plain `str`; advanced implementations can return
        [`InstructionPart`][pydantic_ai.messages.InstructionPart] objects to indicate whether
        each instruction block is static or dynamic for caching purposes.

        Args:
            ctx: The run context for this agent run.

        Returns:
            Instruction string, `InstructionPart`, list of either, or `None`.
            Plain `str` values are treated as dynamic instructions by default.
        """
        return None

    @abstractmethod
    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        """The tools that are available in this toolset."""
        raise NotImplementedError()

    @abstractmethod
    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        """Call a tool with the given arguments.

        Args:
            name: The name of the tool to call.
            tool_args: The arguments to pass to the tool.
            ctx: The run context.
            tool: The tool definition returned by [`get_tools`][pydantic_ai.toolsets.AbstractToolset.get_tools] that was called.
        """
        raise NotImplementedError()

    def apply(self, visitor: Callable[[AbstractToolset[AgentDepsT]], None]) -> None:
        """Run a visitor function on all "leaf" toolsets (i.e. those that implement their own tool listing and calling)."""
        visitor(self)

    def visit_and_replace(
        self, visitor: Callable[[AbstractToolset[AgentDepsT]], AbstractToolset[AgentDepsT]]
    ) -> AbstractToolset[AgentDepsT]:
        """Run a visitor function on all "leaf" toolsets (i.e. those that implement their own tool listing and calling) and replace them in the hierarchy with the result of the function."""
        return visitor(self)

    def filtered(
        self, filter_func: Callable[[RunContext[AgentDepsT], ToolDefinition], bool]
    ) -> FilteredToolset[AgentDepsT]:
        """Returns a new toolset that filters this toolset's tools using a filter function that takes the agent context and the tool definition.

        See [toolset docs](../toolsets.md#filtering-tools) for more information.
        """
        from .filtered import FilteredToolset

        return FilteredToolset(self, filter_func)

    def prefixed(self, prefix: str) -> PrefixedToolset[AgentDepsT]:
        """Returns a new toolset that prefixes the names of this toolset's tools.

        See [toolset docs](../toolsets.md#prefixing-tool-names) for more information.
        """
        from .prefixed import PrefixedToolset

        return PrefixedToolset(self, prefix)

    def prepared(self, prepare_func: ToolsPrepareFunc[AgentDepsT]) -> PreparedToolset[AgentDepsT]:
        """Returns a new toolset that prepares this toolset's tools using a prepare function that takes the agent context and the original tool definitions.

        See [toolset docs](../toolsets.md#preparing-tool-definitions) for more information.
        """
        from .prepared import PreparedToolset

        return PreparedToolset(self, prepare_func)

    def renamed(self, name_map: dict[str, str]) -> RenamedToolset[AgentDepsT]:
        """Returns a new toolset that renames this toolset's tools using a dictionary mapping new names to original names.

        See [toolset docs](../toolsets.md#renaming-tools) for more information.
        """
        from .renamed import RenamedToolset

        return RenamedToolset(self, name_map)

    def approval_required(
        self,
        approval_required_func: Callable[[RunContext[AgentDepsT], ToolDefinition, dict[str, Any]], bool] = (
            lambda ctx, tool_def, tool_args: True
        ),
    ) -> ApprovalRequiredToolset[AgentDepsT]:
        """Returns a new toolset that requires (some) calls to tools it contains to be approved.

        See [toolset docs](../toolsets.md#requiring-tool-approval) for more information.
        """
        from .approval_required import ApprovalRequiredToolset

        return ApprovalRequiredToolset(self, approval_required_func)

    def defer_loading(self, tool_names: Sequence[str] | None = None) -> DeferredLoadingToolset[AgentDepsT]:
        """Returns a new toolset that marks tools for deferred loading, hiding them until discovered via tool search.

        See [toolset docs](../toolsets.md#deferred-loading) for more information.

        Args:
            tool_names: Optional sequence of tool names to mark for deferred loading.
                If `None`, all tools are marked for deferred loading.
        """
        from .deferred_loading import DeferredLoadingToolset

        return DeferredLoadingToolset(self, tool_names=frozenset(tool_names) if tool_names is not None else None)

id abstractmethod property

id: str | None

An ID for the toolset that is unique among all toolsets registered with the same agent.

If you're implementing a concrete implementation that users can instantiate more than once, you should let them optionally pass a custom ID to the constructor and return that here.

A toolset needs to have an ID in order to be used in a durable execution environment like Temporal, in which case the ID will be used to identify the toolset's activities within the workflow.

label property

label: str

The name of the toolset for use in error messages.

tool_name_conflict_hint property

tool_name_conflict_hint: str

A hint for how to avoid name conflicts with other toolsets for use in error messages.

for_run async

Return the toolset to use for this agent run.

Called once per run, before __aenter__. Override this to return a fresh instance for per-run state isolation. Default: return self (shared across runs).

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
108
109
110
111
112
113
114
async def for_run(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
    """Return the toolset to use for this agent run.

    Called once per run, before `__aenter__`. Override this to return a fresh instance
    for per-run state isolation. Default: return `self` (shared across runs).
    """
    return self

for_run_step async

for_run_step(
    ctx: RunContext[AgentDepsT],
) -> AbstractToolset[AgentDepsT]

Return the toolset to use for this run step.

Called at the start of each run step. Override this to return a modified instance for per-step state transitions. If returning a new instance, you are responsible for managing any lifecycle transitions (exiting old inner toolsets, entering new ones). Default: return self (no per-step changes).

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
116
117
118
119
120
121
122
123
124
async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
    """Return the toolset to use for this run step.

    Called at the start of each run step. Override this to return a modified
    instance for per-step state transitions. If returning a new instance,
    you are responsible for managing any lifecycle transitions (exiting old
    inner toolsets, entering new ones). Default: return `self` (no per-step changes).
    """
    return self

__aenter__ async

__aenter__() -> Self

Enter the toolset context.

This is where you can set up network connections in a concrete implementation.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
126
127
128
129
130
131
async def __aenter__(self) -> Self:
    """Enter the toolset context.

    This is where you can set up network connections in a concrete implementation.
    """
    return self

__aexit__ async

__aexit__(*args: Any) -> bool | None

Exit the toolset context.

This is where you can tear down network connections in a concrete implementation.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
133
134
135
136
137
138
async def __aexit__(self, *args: Any) -> bool | None:
    """Exit the toolset context.

    This is where you can tear down network connections in a concrete implementation.
    """
    return None

get_instructions async

get_instructions(
    ctx: RunContext[AgentDepsT],
) -> (
    str
    | InstructionPart
    | Sequence[str | InstructionPart]
    | None
)

Return instructions for how to use this toolset's tools.

Override this method to provide instructions that help the agent understand how to use the tools in this toolset effectively.

Simple implementations can return a plain str; advanced implementations can return InstructionPart objects to indicate whether each instruction block is static or dynamic for caching purposes.

Parameters:

Name Type Description Default
ctx RunContext[AgentDepsT]

The run context for this agent run.

required

Returns:

Type Description
str | InstructionPart | Sequence[str | InstructionPart] | None

Instruction string, InstructionPart, list of either, or None.

str | InstructionPart | Sequence[str | InstructionPart] | None

Plain str values are treated as dynamic instructions by default.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
async def get_instructions(
    self, ctx: RunContext[AgentDepsT]
) -> str | InstructionPart | Sequence[str | InstructionPart] | None:
    r"""Return instructions for how to use this toolset's tools.

    Override this method to provide instructions that help the agent understand
    how to use the tools in this toolset effectively.

    Simple implementations can return a plain `str`; advanced implementations can return
    [`InstructionPart`][pydantic_ai.messages.InstructionPart] objects to indicate whether
    each instruction block is static or dynamic for caching purposes.

    Args:
        ctx: The run context for this agent run.

    Returns:
        Instruction string, `InstructionPart`, list of either, or `None`.
        Plain `str` values are treated as dynamic instructions by default.
    """
    return None

get_tools abstractmethod async

get_tools(
    ctx: RunContext[AgentDepsT],
) -> dict[str, ToolsetTool[AgentDepsT]]

The tools that are available in this toolset.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
161
162
163
164
@abstractmethod
async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
    """The tools that are available in this toolset."""
    raise NotImplementedError()

call_tool abstractmethod async

call_tool(
    name: str,
    tool_args: dict[str, Any],
    ctx: RunContext[AgentDepsT],
    tool: ToolsetTool[AgentDepsT],
) -> Any

Call a tool with the given arguments.

Parameters:

Name Type Description Default
name str

The name of the tool to call.

required
tool_args dict[str, Any]

The arguments to pass to the tool.

required
ctx RunContext[AgentDepsT]

The run context.

required
tool ToolsetTool[AgentDepsT]

The tool definition returned by get_tools that was called.

required
Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
166
167
168
169
170
171
172
173
174
175
176
177
178
@abstractmethod
async def call_tool(
    self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
) -> Any:
    """Call a tool with the given arguments.

    Args:
        name: The name of the tool to call.
        tool_args: The arguments to pass to the tool.
        ctx: The run context.
        tool: The tool definition returned by [`get_tools`][pydantic_ai.toolsets.AbstractToolset.get_tools] that was called.
    """
    raise NotImplementedError()

apply

apply(
    visitor: Callable[[AbstractToolset[AgentDepsT]], None],
) -> None

Run a visitor function on all "leaf" toolsets (i.e. those that implement their own tool listing and calling).

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
180
181
182
def apply(self, visitor: Callable[[AbstractToolset[AgentDepsT]], None]) -> None:
    """Run a visitor function on all "leaf" toolsets (i.e. those that implement their own tool listing and calling)."""
    visitor(self)

visit_and_replace

Run a visitor function on all "leaf" toolsets (i.e. those that implement their own tool listing and calling) and replace them in the hierarchy with the result of the function.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
184
185
186
187
188
def visit_and_replace(
    self, visitor: Callable[[AbstractToolset[AgentDepsT]], AbstractToolset[AgentDepsT]]
) -> AbstractToolset[AgentDepsT]:
    """Run a visitor function on all "leaf" toolsets (i.e. those that implement their own tool listing and calling) and replace them in the hierarchy with the result of the function."""
    return visitor(self)

filtered

Returns a new toolset that filters this toolset's tools using a filter function that takes the agent context and the tool definition.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
190
191
192
193
194
195
196
197
198
199
def filtered(
    self, filter_func: Callable[[RunContext[AgentDepsT], ToolDefinition], bool]
) -> FilteredToolset[AgentDepsT]:
    """Returns a new toolset that filters this toolset's tools using a filter function that takes the agent context and the tool definition.

    See [toolset docs](../toolsets.md#filtering-tools) for more information.
    """
    from .filtered import FilteredToolset

    return FilteredToolset(self, filter_func)

prefixed

prefixed(prefix: str) -> PrefixedToolset[AgentDepsT]

Returns a new toolset that prefixes the names of this toolset's tools.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
201
202
203
204
205
206
207
208
def prefixed(self, prefix: str) -> PrefixedToolset[AgentDepsT]:
    """Returns a new toolset that prefixes the names of this toolset's tools.

    See [toolset docs](../toolsets.md#prefixing-tool-names) for more information.
    """
    from .prefixed import PrefixedToolset

    return PrefixedToolset(self, prefix)

prepared

prepared(
    prepare_func: ToolsPrepareFunc[AgentDepsT],
) -> PreparedToolset[AgentDepsT]

Returns a new toolset that prepares this toolset's tools using a prepare function that takes the agent context and the original tool definitions.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
210
211
212
213
214
215
216
217
def prepared(self, prepare_func: ToolsPrepareFunc[AgentDepsT]) -> PreparedToolset[AgentDepsT]:
    """Returns a new toolset that prepares this toolset's tools using a prepare function that takes the agent context and the original tool definitions.

    See [toolset docs](../toolsets.md#preparing-tool-definitions) for more information.
    """
    from .prepared import PreparedToolset

    return PreparedToolset(self, prepare_func)

renamed

renamed(
    name_map: dict[str, str],
) -> RenamedToolset[AgentDepsT]

Returns a new toolset that renames this toolset's tools using a dictionary mapping new names to original names.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
219
220
221
222
223
224
225
226
def renamed(self, name_map: dict[str, str]) -> RenamedToolset[AgentDepsT]:
    """Returns a new toolset that renames this toolset's tools using a dictionary mapping new names to original names.

    See [toolset docs](../toolsets.md#renaming-tools) for more information.
    """
    from .renamed import RenamedToolset

    return RenamedToolset(self, name_map)

approval_required

approval_required(
    approval_required_func: Callable[
        [
            RunContext[AgentDepsT],
            ToolDefinition,
            dict[str, Any],
        ],
        bool,
    ] = lambda ctx, tool_def, tool_args: True
) -> ApprovalRequiredToolset[AgentDepsT]

Returns a new toolset that requires (some) calls to tools it contains to be approved.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
228
229
230
231
232
233
234
235
236
237
238
239
240
def approval_required(
    self,
    approval_required_func: Callable[[RunContext[AgentDepsT], ToolDefinition, dict[str, Any]], bool] = (
        lambda ctx, tool_def, tool_args: True
    ),
) -> ApprovalRequiredToolset[AgentDepsT]:
    """Returns a new toolset that requires (some) calls to tools it contains to be approved.

    See [toolset docs](../toolsets.md#requiring-tool-approval) for more information.
    """
    from .approval_required import ApprovalRequiredToolset

    return ApprovalRequiredToolset(self, approval_required_func)

defer_loading

defer_loading(
    tool_names: Sequence[str] | None = None,
) -> DeferredLoadingToolset[AgentDepsT]

Returns a new toolset that marks tools for deferred loading, hiding them until discovered via tool search.

See toolset docs for more information.

Parameters:

Name Type Description Default
tool_names Sequence[str] | None

Optional sequence of tool names to mark for deferred loading. If None, all tools are marked for deferred loading.

None
Source code in pydantic_ai_slim/pydantic_ai/toolsets/abstract.py
242
243
244
245
246
247
248
249
250
251
252
253
def defer_loading(self, tool_names: Sequence[str] | None = None) -> DeferredLoadingToolset[AgentDepsT]:
    """Returns a new toolset that marks tools for deferred loading, hiding them until discovered via tool search.

    See [toolset docs](../toolsets.md#deferred-loading) for more information.

    Args:
        tool_names: Optional sequence of tool names to mark for deferred loading.
            If `None`, all tools are marked for deferred loading.
    """
    from .deferred_loading import DeferredLoadingToolset

    return DeferredLoadingToolset(self, tool_names=frozenset(tool_names) if tool_names is not None else None)

CombinedToolset dataclass

Bases: AbstractToolset[AgentDepsT]

A toolset that combines multiple toolsets.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/combined.py
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
@dataclass
class CombinedToolset(AbstractToolset[AgentDepsT]):
    """A toolset that combines multiple toolsets.

    See [toolset docs](../toolsets.md#combining-toolsets) for more information.
    """

    toolsets: Sequence[AbstractToolset[AgentDepsT]]

    _exit_stack: AsyncExitStack | None = field(init=False, default=None)

    @property
    def id(self) -> str | None:
        return None  # pragma: no cover

    @property
    def label(self) -> str:
        return f'{self.__class__.__name__}({", ".join(toolset.label for toolset in self.toolsets)})'  # pragma: no cover

    async def for_run(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
        new_toolsets = [await t.for_run(ctx) for t in self.toolsets]
        return replace(self, toolsets=new_toolsets)

    async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
        new_toolsets = [await t.for_run_step(ctx) for t in self.toolsets]
        if all(new is old for new, old in zip(new_toolsets, self.toolsets)):
            return self
        return replace(self, toolsets=new_toolsets)

    async def __aenter__(self) -> Self:
        async with AsyncExitStack() as exit_stack:
            for toolset in self.toolsets:
                await exit_stack.enter_async_context(toolset)
            self._exit_stack = exit_stack.pop_all()
        return self

    async def __aexit__(self, *args: Any) -> bool | None:
        if self._exit_stack is not None:
            await self._exit_stack.aclose()
            self._exit_stack = None

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        toolsets_tools = await asyncio.gather(*(toolset.get_tools(ctx) for toolset in self.toolsets))
        all_tools: dict[str, ToolsetTool[AgentDepsT]] = {}

        for toolset, tools in zip(self.toolsets, toolsets_tools):
            for name, tool in tools.items():
                tool_toolset = tool.toolset
                if existing_tool := all_tools.get(name):
                    capitalized_toolset_label = tool_toolset.label[0].upper() + tool_toolset.label[1:]
                    raise UserError(
                        f'{capitalized_toolset_label} defines a tool whose name conflicts with existing tool from {existing_tool.toolset.label}: {name!r}. {toolset.tool_name_conflict_hint}'
                    )

                all_tools[name] = _CombinedToolsetTool(
                    toolset=tool_toolset,
                    tool_def=tool.tool_def,
                    max_retries=tool.max_retries,
                    args_validator=tool.args_validator,
                    args_validator_func=tool.args_validator_func,
                    source_toolset=toolset,
                    source_tool=tool,
                )
        return all_tools

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        assert isinstance(tool, _CombinedToolsetTool)
        return await tool.source_toolset.call_tool(name, tool_args, ctx, tool.source_tool)

    def apply(self, visitor: Callable[[AbstractToolset[AgentDepsT]], None]) -> None:
        for toolset in self.toolsets:
            toolset.apply(visitor)

    def visit_and_replace(
        self, visitor: Callable[[AbstractToolset[AgentDepsT]], AbstractToolset[AgentDepsT]]
    ) -> AbstractToolset[AgentDepsT]:
        return replace(self, toolsets=[toolset.visit_and_replace(visitor) for toolset in self.toolsets])

    async def get_instructions(self, ctx: RunContext[AgentDepsT]) -> list[str | InstructionPart] | None:
        results = await asyncio.gather(*(ts.get_instructions(ctx) for ts in self.toolsets))
        parts: list[str | InstructionPart] = []
        for r in results:
            if r is not None:
                if isinstance(r, (str, InstructionPart)):
                    parts.append(r)
                else:
                    parts.extend(r)
        return parts or None

ExternalToolset

Bases: AbstractToolset[AgentDepsT]

A toolset that holds tools whose results will be produced outside of the Pydantic AI agent run in which they were called.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/external.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
class ExternalToolset(AbstractToolset[AgentDepsT]):
    """A toolset that holds tools whose results will be produced outside of the Pydantic AI agent run in which they were called.

    See [toolset docs](../toolsets.md#external-toolset) for more information.
    """

    tool_defs: list[ToolDefinition]
    _id: str | None

    def __init__(self, tool_defs: list[ToolDefinition], *, id: str | None = None):
        self.tool_defs = tool_defs
        self._id = id

    @property
    def id(self) -> str | None:
        return self._id

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        return {
            tool_def.name: ToolsetTool(
                toolset=self,
                tool_def=replace(tool_def, kind='external'),
                max_retries=0,
                args_validator=TOOL_SCHEMA_VALIDATOR,
            )
            for tool_def in self.tool_defs
        }

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        raise NotImplementedError('External tools cannot be called directly')

ApprovalRequiredToolset dataclass

Bases: WrapperToolset[AgentDepsT]

A toolset that requires (some) calls to tools it contains to be approved.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/approval_required.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
@dataclass
class ApprovalRequiredToolset(WrapperToolset[AgentDepsT]):
    """A toolset that requires (some) calls to tools it contains to be approved.

    See [toolset docs](../toolsets.md#requiring-tool-approval) for more information.
    """

    approval_required_func: Callable[[RunContext[AgentDepsT], ToolDefinition, dict[str, Any]], bool] = (
        lambda ctx, tool_def, tool_args: True
    )

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        if not ctx.tool_call_approved and self.approval_required_func(ctx, tool.tool_def, tool_args):
            raise ApprovalRequired

        return await super().call_tool(name, tool_args, ctx, tool)

FilteredToolset dataclass

Bases: WrapperToolset[AgentDepsT]

A toolset that filters the tools it contains using a filter function that takes the agent context and the tool definition.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/filtered.py
12
13
14
15
16
17
18
19
20
21
22
23
24
@dataclass
class FilteredToolset(WrapperToolset[AgentDepsT]):
    """A toolset that filters the tools it contains using a filter function that takes the agent context and the tool definition.

    See [toolset docs](../toolsets.md#filtering-tools) for more information.
    """

    filter_func: Callable[[RunContext[AgentDepsT], ToolDefinition], bool]

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        return {
            name: tool for name, tool in (await super().get_tools(ctx)).items() if self.filter_func(ctx, tool.tool_def)
        }

FunctionToolset

Bases: AbstractToolset[AgentDepsT]

A toolset that lets Python functions be used as tools.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/function.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
class FunctionToolset(AbstractToolset[AgentDepsT]):
    """A toolset that lets Python functions be used as tools.

    See [toolset docs](../toolsets.md#function-toolset) for more information.
    """

    tools: dict[str, Tool[Any]]
    max_retries: int
    timeout: float | None
    _id: str | None
    docstring_format: DocstringFormat
    require_parameter_descriptions: bool
    schema_generator: type[GenerateJsonSchema]
    _defer_loading: bool

    def __init__(
        self,
        tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [],
        *,
        max_retries: int = 1,
        timeout: float | None = None,
        docstring_format: DocstringFormat = 'auto',
        require_parameter_descriptions: bool = False,
        schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
        strict: bool | None = None,
        sequential: bool = False,
        requires_approval: bool = False,
        metadata: dict[str, Any] | None = None,
        defer_loading: bool = False,
        id: str | None = None,
        instructions: str | SystemPromptFunc[AgentDepsT] | Sequence[str | SystemPromptFunc[AgentDepsT]] | None = None,
    ):
        """Build a new function toolset.

        Args:
            tools: The tools to add to the toolset.
            max_retries: The maximum number of retries for each tool during a run.
                Applies to all tools, unless overridden when adding a tool.
            timeout: Timeout in seconds for tool execution. If a tool takes longer than this,
                a retry prompt is returned to the model. Individual tools can override this with their own timeout.
                Defaults to None (no timeout).
            docstring_format: Format of tool docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
                Defaults to `'auto'`, such that the format is inferred from the structure of the docstring.
                Applies to all tools, unless overridden when adding a tool.
            require_parameter_descriptions: If True, raise an error if a parameter description is missing. Defaults to False.
                Applies to all tools, unless overridden when adding a tool.
            schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
                Applies to all tools, unless overridden when adding a tool.
            strict: Whether to enforce JSON schema compliance (only affects OpenAI).
                See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
            sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
                Applies to all tools, unless overridden when adding a tool.
            requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
                See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
                Applies to all tools, unless overridden when adding a tool.
            metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
                Applies to all tools, unless overridden when adding a tool, which will be merged with the toolset's metadata.
            defer_loading: Whether to hide tools from the model until discovered via tool search. Defaults to False.
                See [Tool Search](../tools-advanced.md#tool-search) for more info.
                Applies to all tools, unless overridden when adding a tool.
            id: An optional unique ID for the toolset. A toolset needs to have an ID in order to be used in a durable execution environment like Temporal,
                in which case the ID will be used to identify the toolset's activities within the workflow.
            instructions: Instructions for this toolset that are automatically included in the model request.
                Can be a string, a function (sync or async, with or without `RunContext`), or a sequence of these.
        """
        self.max_retries = max_retries
        self.timeout = timeout
        self._id = id
        self.docstring_format = docstring_format
        self.require_parameter_descriptions = require_parameter_descriptions
        self.schema_generator = schema_generator
        self.strict = strict
        self.sequential = sequential
        self.requires_approval = requires_approval
        self.metadata = metadata
        self._defer_loading = defer_loading

        self._instructions: list[str | SystemPromptRunner[AgentDepsT]] = []
        if instructions is not None:
            if isinstance(instructions, str) or callable(instructions):
                instructions = [instructions]
            for instruction in instructions:
                if isinstance(instruction, str):
                    self._instructions.append(instruction)
                else:
                    self._instructions.append(SystemPromptRunner(instruction))

        self.tools = {}
        for tool in tools:
            if isinstance(tool, Tool):
                self.add_tool(tool)  # pyright: ignore[reportUnknownArgumentType]
            else:
                self.add_function(tool)

    @property
    def id(self) -> str | None:
        return self._id

    @overload
    def tool(self, func: ToolFuncContext[AgentDepsT, ToolParams], /) -> ToolFuncContext[AgentDepsT, ToolParams]: ...

    @overload
    def tool(
        self,
        /,
        *,
        name: str | None = None,
        description: str | None = None,
        retries: int | None = None,
        prepare: ToolPrepareFunc[AgentDepsT] | None = None,
        args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
        docstring_format: DocstringFormat | None = None,
        require_parameter_descriptions: bool | None = None,
        schema_generator: type[GenerateJsonSchema] | None = None,
        strict: bool | None = None,
        sequential: bool | None = None,
        requires_approval: bool | None = None,
        metadata: dict[str, Any] | None = None,
        timeout: float | None = None,
        defer_loading: bool | None = None,
    ) -> Callable[[ToolFuncContext[AgentDepsT, ToolParams]], ToolFuncContext[AgentDepsT, ToolParams]]: ...

    def tool(
        self,
        func: ToolFuncContext[AgentDepsT, ToolParams] | None = None,
        /,
        *,
        name: str | None = None,
        description: str | None = None,
        retries: int | None = None,
        prepare: ToolPrepareFunc[AgentDepsT] | None = None,
        args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
        docstring_format: DocstringFormat | None = None,
        require_parameter_descriptions: bool | None = None,
        schema_generator: type[GenerateJsonSchema] | None = None,
        strict: bool | None = None,
        sequential: bool | None = None,
        requires_approval: bool | None = None,
        metadata: dict[str, Any] | None = None,
        timeout: float | None = None,
        defer_loading: bool | None = None,
    ) -> Any:
        """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument.

        Can decorate a sync or async functions.

        The docstring is inspected to extract both the tool description and description of each parameter,
        [learn more](../tools.md#function-tools-and-schema).

        We can't add overloads for every possible signature of tool, since the return type is a recursive union
        so the signature of functions decorated with `@toolset.tool` is obscured.

        Example:
        ```python
        from pydantic_ai import Agent, FunctionToolset, RunContext

        toolset = FunctionToolset()

        @toolset.tool
        def foobar(ctx: RunContext[int], x: int) -> int:
            return ctx.deps + x

        @toolset.tool(retries=2)
        async def spam(ctx: RunContext[str], y: float) -> float:
            return ctx.deps + y

        agent = Agent('test', toolsets=[toolset], deps_type=int)
        result = agent.run_sync('foobar', deps=1)
        print(result.output)
        #> {"foobar":1,"spam":1.0}
        ```

        Args:
            func: The tool function to register.
            name: The name of the tool, defaults to the function name.
            description: The description of the tool,defaults to the function docstring.
            retries: The number of retries to allow for this tool, defaults to the agent's default retries,
                which defaults to 1.
            prepare: custom method to prepare the tool definition for each step, return `None` to omit this
                tool from a given step. This is useful if you want to customise a tool at call time,
                or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
            args_validator: custom method to validate tool arguments after schema validation has passed,
                before execution. The validator receives the already-validated and type-converted parameters,
                with `RunContext` as the first argument.
                Should raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] on validation failure,
                return `None` on success.
                See [`ArgsValidatorFunc`][pydantic_ai.tools.ArgsValidatorFunc].
            docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
                If `None`, the default value is determined by the toolset.
            require_parameter_descriptions: If True, raise an error if a parameter description is missing.
                If `None`, the default value is determined by the toolset.
            schema_generator: The JSON schema generator class to use for this tool.
                If `None`, the default value is determined by the toolset.
            strict: Whether to enforce JSON schema compliance (only affects OpenAI).
                See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
                If `None`, the default value is determined by the toolset.
            sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
                If `None`, the default value is determined by the toolset.
            requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
                See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
                If `None`, the default value is determined by the toolset.
            metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
                If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
            timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model.
                Defaults to None (no timeout).
            defer_loading: Whether to hide this tool until it's discovered via tool search.
                See [Tool Search](../tools-advanced.md#tool-search) for more info.
                If `None`, the default value is determined by the toolset.
        """

        def tool_decorator(
            func_: ToolFuncContext[AgentDepsT, ToolParams],
        ) -> ToolFuncContext[AgentDepsT, ToolParams]:
            # TODO(v2): Remove this deprecation fallback
            #  and let takes_ctx=True propagate, which will raise a runtime error
            #  in function_schema if the function doesn't accept RunContext.

            # Is the func actually taking RunContext or is it a plain function in disguise?

            tool = self.add_function(
                func=func_,
                takes_ctx=None,
                name=name,
                description=description,
                retries=retries,
                prepare=prepare,
                args_validator=args_validator,
                docstring_format=docstring_format,
                require_parameter_descriptions=require_parameter_descriptions,
                schema_generator=schema_generator,
                strict=strict,
                sequential=sequential,
                requires_approval=requires_approval,
                metadata=metadata,
                timeout=timeout,
                defer_loading=defer_loading,
            )
            if not tool.function_schema.takes_ctx:
                warnings.warn(
                    'Passing a function without `RunContext` to `FunctionToolset.tool()` is deprecated, use `tool_plain()` instead.',
                    DeprecationWarning,
                    stacklevel=2,
                )

            return func_

        return tool_decorator if func is None else tool_decorator(func)

    @overload
    def tool_plain(self, func: ToolFuncPlain[ToolParams], /) -> ToolFuncPlain[ToolParams]: ...

    @overload
    def tool_plain(
        self,
        /,
        *,
        name: str | None = None,
        description: str | None = None,
        retries: int | None = None,
        prepare: ToolPrepareFunc[AgentDepsT] | None = None,
        args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
        docstring_format: DocstringFormat | None = None,
        require_parameter_descriptions: bool | None = None,
        schema_generator: type[GenerateJsonSchema] | None = None,
        strict: bool | None = None,
        sequential: bool | None = None,
        requires_approval: bool | None = None,
        metadata: dict[str, Any] | None = None,
        timeout: float | None = None,
        defer_loading: bool | None = None,
    ) -> Callable[[ToolFuncPlain[ToolParams]], ToolFuncPlain[ToolParams]]: ...

    def tool_plain(
        self,
        func: ToolFuncPlain[ToolParams] | None = None,
        /,
        *,
        name: str | None = None,
        description: str | None = None,
        retries: int | None = None,
        prepare: ToolPrepareFunc[AgentDepsT] | None = None,
        args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
        docstring_format: DocstringFormat | None = None,
        require_parameter_descriptions: bool | None = None,
        schema_generator: type[GenerateJsonSchema] | None = None,
        strict: bool | None = None,
        sequential: bool | None = None,
        requires_approval: bool | None = None,
        metadata: dict[str, Any] | None = None,
        timeout: float | None = None,
        defer_loading: bool | None = None,
    ) -> Any:
        """Decorator to register a tool function which DOES NOT take `RunContext` as an argument.

        Can decorate a sync or async functions.

        The docstring is inspected to extract both the tool description and description of each parameter,
        [learn more](../tools.md#function-tools-and-schema).

        We can't add overloads for every possible signature of tool, since the return type is a recursive union
        so the signature of functions decorated with `@toolset.tool_plain` is obscured.

        Example:
        ```python
        from pydantic_ai import Agent, FunctionToolset

        toolset = FunctionToolset()

        @toolset.tool_plain
        def foobar(x: int) -> int:
            return x + 1

        @toolset.tool_plain(retries=2)
        async def spam(y: float) -> float:
            return y * 2.0

        agent = Agent('test', toolsets=[toolset])
        result = agent.run_sync('foobar')
        print(result.output)
        #> {"foobar":1,"spam":0.0}
        ```

        Args:
            func: The tool function to register.
            name: The name of the tool, defaults to the function name.
            description: The description of the tool, defaults to the function docstring.
            retries: The number of retries to allow for this tool, defaults to the toolset's default retries,
                which defaults to 1.
            prepare: custom method to prepare the tool definition for each step, return `None` to omit this
                tool from a given step. This is useful if you want to customise a tool at call time,
                or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
            args_validator: custom method to validate tool arguments after schema validation has passed,
                before execution. The validator receives the already-validated and type-converted parameters,
                with [`RunContext`][pydantic_ai.tools.RunContext] as the first argument — even though the
                tool function itself does not take `RunContext` when using `tool_plain`.
                Should raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] on validation failure,
                return `None` on success.
                See [`ArgsValidatorFunc`][pydantic_ai.tools.ArgsValidatorFunc].
            docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
                If `None`, the default value is determined by the toolset.
            require_parameter_descriptions: If True, raise an error if a parameter description is missing.
                If `None`, the default value is determined by the toolset.
            schema_generator: The JSON schema generator class to use for this tool.
                If `None`, the default value is determined by the toolset.
            strict: Whether to enforce JSON schema compliance (only affects OpenAI).
                See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
                If `None`, the default value is determined by the toolset.
            sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
                If `None`, the default value is determined by the toolset.
            requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
                See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
                If `None`, the default value is determined by the toolset.
            metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
                If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
            timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model.
                Defaults to None (no timeout).
            defer_loading: Whether to hide this tool until it's discovered via tool search.
                See [Tool Search](../tools-advanced.md#tool-search) for more info.
                If `None`, the default value is determined by the toolset.
        """

        def tool_decorator(
            func_: ToolFuncPlain[ToolParams],
        ) -> ToolFuncPlain[ToolParams]:
            # noinspection PyTypeChecker
            self.add_function(
                func=func_,
                takes_ctx=False,
                name=name,
                description=description,
                retries=retries,
                prepare=prepare,
                args_validator=args_validator,
                docstring_format=docstring_format,
                require_parameter_descriptions=require_parameter_descriptions,
                schema_generator=schema_generator,
                strict=strict,
                sequential=sequential,
                requires_approval=requires_approval,
                metadata=metadata,
                timeout=timeout,
                defer_loading=defer_loading,
            )
            return func_

        return tool_decorator if func is None else tool_decorator(func)

    def instructions(
        self,
        func: SystemPromptFunc[AgentDepsT],
        /,
    ) -> SystemPromptFunc[AgentDepsT]:
        """Decorator to register an instructions function for this toolset.

        The function can be sync or async, and can optionally take a
        [`RunContext`][pydantic_ai.tools.RunContext] as its first argument.

        Example:
        ```python
        from pydantic_ai import FunctionToolset, RunContext

        toolset = FunctionToolset[int]()

        @toolset.instructions
        def my_instructions(ctx: RunContext[int]) -> str:
            return 'Always use the search tool when looking for information.'

        @toolset.tool
        def search(ctx: RunContext[int], query: str) -> str:
            return f'Results for: {query}'
        ```

        Args:
            func: The instructions function to register.
        """
        self._instructions.append(SystemPromptRunner(func))
        return func

    def add_function(
        self,
        func: ToolFuncEither[AgentDepsT, ToolParams],
        takes_ctx: bool | None = None,
        name: str | None = None,
        description: str | None = None,
        retries: int | None = None,
        prepare: ToolPrepareFunc[AgentDepsT] | None = None,
        args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
        docstring_format: DocstringFormat | None = None,
        require_parameter_descriptions: bool | None = None,
        schema_generator: type[GenerateJsonSchema] | None = None,
        strict: bool | None = None,
        sequential: bool | None = None,
        requires_approval: bool | None = None,
        defer_loading: bool | None = None,
        metadata: dict[str, Any] | None = None,
        timeout: float | None = None,
    ) -> Tool[AgentDepsT]:
        """Add a function as a tool to the toolset.

        Can take a sync or async function.

        The docstring is inspected to extract both the tool description and description of each parameter,
        [learn more](../tools.md#function-tools-and-schema).

        Args:
            func: The tool function to register.
            takes_ctx: Whether the function takes a [`RunContext`][pydantic_ai.tools.RunContext] as its first argument. If `None`, this is inferred from the function signature.
            name: The name of the tool, defaults to the function name.
            description: The description of the tool, defaults to the function docstring.
            retries: The number of retries to allow for this tool, defaults to the agent's default retries,
                which defaults to 1.
            prepare: custom method to prepare the tool definition for each step, return `None` to omit this
                tool from a given step. This is useful if you want to customise a tool at call time,
                or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
            args_validator: custom method to validate tool arguments after schema validation has passed,
                before execution. The validator receives the already-validated and type-converted parameters,
                with `RunContext` as the first argument.
                Should raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] on validation failure,
                return `None` on success.
                See [`ArgsValidatorFunc`][pydantic_ai.tools.ArgsValidatorFunc].
            docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
                If `None`, the default value is determined by the toolset.
            require_parameter_descriptions: If True, raise an error if a parameter description is missing.
                If `None`, the default value is determined by the toolset.
            schema_generator: The JSON schema generator class to use for this tool.
                If `None`, the default value is determined by the toolset.
            strict: Whether to enforce JSON schema compliance (only affects OpenAI).
                See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
                If `None`, the default value is determined by the toolset.
            sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
                If `None`, the default value is determined by the toolset.
            requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
                See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
                If `None`, the default value is determined by the toolset.
            defer_loading: Whether to hide this tool until it's discovered via tool search.
                See [Tool Search](../tools-advanced.md#tool-search) for more info.
                If `None`, the default value is determined by the toolset.
            metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
                If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
            timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model.
                Defaults to None (no timeout).
        """
        if docstring_format is None:
            docstring_format = self.docstring_format
        if require_parameter_descriptions is None:
            require_parameter_descriptions = self.require_parameter_descriptions
        if schema_generator is None:
            schema_generator = self.schema_generator
        if strict is None:
            strict = self.strict
        if sequential is None:
            sequential = self.sequential
        if requires_approval is None:
            requires_approval = self.requires_approval
        if defer_loading is None:
            defer_loading = self._defer_loading

        tool = Tool[AgentDepsT](
            func,
            takes_ctx=takes_ctx,
            name=name,
            description=description,
            max_retries=retries,
            prepare=prepare,
            args_validator=args_validator,
            docstring_format=docstring_format,
            require_parameter_descriptions=require_parameter_descriptions,
            schema_generator=schema_generator,
            strict=strict,
            sequential=sequential,
            requires_approval=requires_approval,
            metadata=metadata,
            timeout=timeout,
            defer_loading=defer_loading,
        )
        self.add_tool(tool)
        return tool

    def add_tool(self, tool: Tool[AgentDepsT]) -> None:
        """Add a tool to the toolset.

        Args:
            tool: The tool to add.
        """
        if tool.name in self.tools:
            raise UserError(f'Tool name conflicts with existing tool: {tool.name!r}')
        if tool.max_retries is None:
            tool.max_retries = self.max_retries
        if self.metadata is not None:
            tool.metadata = self.metadata | (tool.metadata or {})
        self.tools[tool.name] = tool

    async def get_instructions(self, ctx: RunContext[AgentDepsT]) -> list[InstructionPart] | None:
        if not self._instructions:
            return None
        parts: list[InstructionPart] = []
        for func in self._instructions:
            if isinstance(func, str):
                if func.strip():
                    parts.append(InstructionPart(content=func, dynamic=False))
            else:
                result = await func.run(ctx)
                if result and result.strip():
                    parts.append(InstructionPart(content=result, dynamic=True))
        return parts or None

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        tools: dict[str, ToolsetTool[AgentDepsT]] = {}
        for original_name, tool in self.tools.items():
            max_retries = tool.max_retries if tool.max_retries is not None else self.max_retries
            run_context = replace(
                ctx,
                tool_name=original_name,
                retry=ctx.retries.get(original_name, 0),
                max_retries=max_retries,
            )
            tool_def = await tool.prepare_tool_def(run_context)
            if not tool_def:
                continue

            new_name = tool_def.name
            if new_name in tools:
                if new_name != original_name:
                    raise UserError(f'Renaming tool {original_name!r} to {new_name!r} conflicts with existing tool.')
                else:
                    raise UserError(f'Tool name conflicts with previously renamed tool: {new_name!r}.')

            tools[new_name] = FunctionToolsetTool(
                toolset=self,
                tool_def=tool_def,
                max_retries=max_retries,
                args_validator=tool.function_schema.validator,
                args_validator_func=tool.args_validator,
                call_func=tool.function_schema.call,
                is_async=tool.function_schema.is_async,
                timeout=tool_def.timeout,
            )
        return tools

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        assert isinstance(tool, FunctionToolsetTool)

        # Per-tool timeout takes precedence over toolset timeout
        timeout = tool.timeout if tool.timeout is not None else self.timeout
        if timeout is not None:
            try:
                with anyio.fail_after(timeout):
                    return await tool.call_func(tool_args, ctx)
            except TimeoutError:
                raise ModelRetry(f'Timed out after {timeout} seconds.') from None
        else:
            return await tool.call_func(tool_args, ctx)

__init__

__init__(
    tools: Sequence[
        Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]
    ] = [],
    *,
    max_retries: int = 1,
    timeout: float | None = None,
    docstring_format: DocstringFormat = "auto",
    require_parameter_descriptions: bool = False,
    schema_generator: type[
        GenerateJsonSchema
    ] = GenerateToolJsonSchema,
    strict: bool | None = None,
    sequential: bool = False,
    requires_approval: bool = False,
    metadata: dict[str, Any] | None = None,
    defer_loading: bool = False,
    id: str | None = None,
    instructions: (
        str
        | SystemPromptFunc[AgentDepsT]
        | Sequence[str | SystemPromptFunc[AgentDepsT]]
        | None
    ) = None
)

Build a new function toolset.

Parameters:

Name Type Description Default
tools Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]]

The tools to add to the toolset.

[]
max_retries int

The maximum number of retries for each tool during a run. Applies to all tools, unless overridden when adding a tool.

1
timeout float | None

Timeout in seconds for tool execution. If a tool takes longer than this, a retry prompt is returned to the model. Individual tools can override this with their own timeout. Defaults to None (no timeout).

None
docstring_format DocstringFormat

Format of tool docstring, see DocstringFormat. Defaults to 'auto', such that the format is inferred from the structure of the docstring. Applies to all tools, unless overridden when adding a tool.

'auto'
require_parameter_descriptions bool

If True, raise an error if a parameter description is missing. Defaults to False. Applies to all tools, unless overridden when adding a tool.

False
schema_generator type[GenerateJsonSchema]

The JSON schema generator class to use for this tool. Defaults to GenerateToolJsonSchema. Applies to all tools, unless overridden when adding a tool.

GenerateToolJsonSchema
strict bool | None

Whether to enforce JSON schema compliance (only affects OpenAI). See ToolDefinition for more info.

None
sequential bool

Whether the function requires a sequential/serial execution environment. Defaults to False. Applies to all tools, unless overridden when adding a tool.

False
requires_approval bool

Whether this tool requires human-in-the-loop approval. Defaults to False. See the tools documentation for more info. Applies to all tools, unless overridden when adding a tool.

False
metadata dict[str, Any] | None

Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. Applies to all tools, unless overridden when adding a tool, which will be merged with the toolset's metadata.

None
defer_loading bool

Whether to hide tools from the model until discovered via tool search. Defaults to False. See Tool Search for more info. Applies to all tools, unless overridden when adding a tool.

False
id str | None

An optional unique ID for the toolset. A toolset needs to have an ID in order to be used in a durable execution environment like Temporal, in which case the ID will be used to identify the toolset's activities within the workflow.

None
instructions str | SystemPromptFunc[AgentDepsT] | Sequence[str | SystemPromptFunc[AgentDepsT]] | None

Instructions for this toolset that are automatically included in the model request. Can be a string, a function (sync or async, with or without RunContext), or a sequence of these.

None
Source code in pydantic_ai_slim/pydantic_ai/toolsets/function.py
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def __init__(
    self,
    tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [],
    *,
    max_retries: int = 1,
    timeout: float | None = None,
    docstring_format: DocstringFormat = 'auto',
    require_parameter_descriptions: bool = False,
    schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
    strict: bool | None = None,
    sequential: bool = False,
    requires_approval: bool = False,
    metadata: dict[str, Any] | None = None,
    defer_loading: bool = False,
    id: str | None = None,
    instructions: str | SystemPromptFunc[AgentDepsT] | Sequence[str | SystemPromptFunc[AgentDepsT]] | None = None,
):
    """Build a new function toolset.

    Args:
        tools: The tools to add to the toolset.
        max_retries: The maximum number of retries for each tool during a run.
            Applies to all tools, unless overridden when adding a tool.
        timeout: Timeout in seconds for tool execution. If a tool takes longer than this,
            a retry prompt is returned to the model. Individual tools can override this with their own timeout.
            Defaults to None (no timeout).
        docstring_format: Format of tool docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
            Defaults to `'auto'`, such that the format is inferred from the structure of the docstring.
            Applies to all tools, unless overridden when adding a tool.
        require_parameter_descriptions: If True, raise an error if a parameter description is missing. Defaults to False.
            Applies to all tools, unless overridden when adding a tool.
        schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
            Applies to all tools, unless overridden when adding a tool.
        strict: Whether to enforce JSON schema compliance (only affects OpenAI).
            See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
        sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
            Applies to all tools, unless overridden when adding a tool.
        requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
            See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
            Applies to all tools, unless overridden when adding a tool.
        metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
            Applies to all tools, unless overridden when adding a tool, which will be merged with the toolset's metadata.
        defer_loading: Whether to hide tools from the model until discovered via tool search. Defaults to False.
            See [Tool Search](../tools-advanced.md#tool-search) for more info.
            Applies to all tools, unless overridden when adding a tool.
        id: An optional unique ID for the toolset. A toolset needs to have an ID in order to be used in a durable execution environment like Temporal,
            in which case the ID will be used to identify the toolset's activities within the workflow.
        instructions: Instructions for this toolset that are automatically included in the model request.
            Can be a string, a function (sync or async, with or without `RunContext`), or a sequence of these.
    """
    self.max_retries = max_retries
    self.timeout = timeout
    self._id = id
    self.docstring_format = docstring_format
    self.require_parameter_descriptions = require_parameter_descriptions
    self.schema_generator = schema_generator
    self.strict = strict
    self.sequential = sequential
    self.requires_approval = requires_approval
    self.metadata = metadata
    self._defer_loading = defer_loading

    self._instructions: list[str | SystemPromptRunner[AgentDepsT]] = []
    if instructions is not None:
        if isinstance(instructions, str) or callable(instructions):
            instructions = [instructions]
        for instruction in instructions:
            if isinstance(instruction, str):
                self._instructions.append(instruction)
            else:
                self._instructions.append(SystemPromptRunner(instruction))

    self.tools = {}
    for tool in tools:
        if isinstance(tool, Tool):
            self.add_tool(tool)  # pyright: ignore[reportUnknownArgumentType]
        else:
            self.add_function(tool)

tool

tool(
    *,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: (
        ArgsValidatorFunc[AgentDepsT, ToolParams] | None
    ) = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: (
        type[GenerateJsonSchema] | None
    ) = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
    defer_loading: bool | None = None
) -> Callable[
    [ToolFuncContext[AgentDepsT, ToolParams]],
    ToolFuncContext[AgentDepsT, ToolParams],
]
tool(
    func: (
        ToolFuncContext[AgentDepsT, ToolParams] | None
    ) = None,
    /,
    *,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: (
        ArgsValidatorFunc[AgentDepsT, ToolParams] | None
    ) = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: (
        type[GenerateJsonSchema] | None
    ) = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
    defer_loading: bool | None = None,
) -> Any

Decorator to register a tool function which takes RunContext as its first argument.

Can decorate a sync or async functions.

The docstring is inspected to extract both the tool description and description of each parameter, learn more.

We can't add overloads for every possible signature of tool, since the return type is a recursive union so the signature of functions decorated with @toolset.tool is obscured.

Example:

from pydantic_ai import Agent, FunctionToolset, RunContext

toolset = FunctionToolset()

@toolset.tool
def foobar(ctx: RunContext[int], x: int) -> int:
    return ctx.deps + x

@toolset.tool(retries=2)
async def spam(ctx: RunContext[str], y: float) -> float:
    return ctx.deps + y

agent = Agent('test', toolsets=[toolset], deps_type=int)
result = agent.run_sync('foobar', deps=1)
print(result.output)
#> {"foobar":1,"spam":1.0}

Parameters:

Name Type Description Default
func ToolFuncContext[AgentDepsT, ToolParams] | None

The tool function to register.

None
name str | None

The name of the tool, defaults to the function name.

None
description str | None

The description of the tool,defaults to the function docstring.

None
retries int | None

The number of retries to allow for this tool, defaults to the agent's default retries, which defaults to 1.

None
prepare ToolPrepareFunc[AgentDepsT] | None

custom method to prepare the tool definition for each step, return None to omit this tool from a given step. This is useful if you want to customise a tool at call time, or omit it completely from a step. See ToolPrepareFunc.

None
args_validator ArgsValidatorFunc[AgentDepsT, ToolParams] | None

custom method to validate tool arguments after schema validation has passed, before execution. The validator receives the already-validated and type-converted parameters, with RunContext as the first argument. Should raise ModelRetry on validation failure, return None on success. See ArgsValidatorFunc.

None
docstring_format DocstringFormat | None

The format of the docstring, see DocstringFormat. If None, the default value is determined by the toolset.

None
require_parameter_descriptions bool | None

If True, raise an error if a parameter description is missing. If None, the default value is determined by the toolset.

None
schema_generator type[GenerateJsonSchema] | None

The JSON schema generator class to use for this tool. If None, the default value is determined by the toolset.

None
strict bool | None

Whether to enforce JSON schema compliance (only affects OpenAI). See ToolDefinition for more info. If None, the default value is determined by the toolset.

None
sequential bool | None

Whether the function requires a sequential/serial execution environment. Defaults to False. If None, the default value is determined by the toolset.

None
requires_approval bool | None

Whether this tool requires human-in-the-loop approval. Defaults to False. See the tools documentation for more info. If None, the default value is determined by the toolset.

None
metadata dict[str, Any] | None

Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. If None, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.

None
timeout float | None

Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. Defaults to None (no timeout).

None
defer_loading bool | None

Whether to hide this tool until it's discovered via tool search. See Tool Search for more info. If None, the default value is determined by the toolset.

None
Source code in pydantic_ai_slim/pydantic_ai/toolsets/function.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
def tool(
    self,
    func: ToolFuncContext[AgentDepsT, ToolParams] | None = None,
    /,
    *,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: type[GenerateJsonSchema] | None = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
    defer_loading: bool | None = None,
) -> Any:
    """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument.

    Can decorate a sync or async functions.

    The docstring is inspected to extract both the tool description and description of each parameter,
    [learn more](../tools.md#function-tools-and-schema).

    We can't add overloads for every possible signature of tool, since the return type is a recursive union
    so the signature of functions decorated with `@toolset.tool` is obscured.

    Example:
    ```python
    from pydantic_ai import Agent, FunctionToolset, RunContext

    toolset = FunctionToolset()

    @toolset.tool
    def foobar(ctx: RunContext[int], x: int) -> int:
        return ctx.deps + x

    @toolset.tool(retries=2)
    async def spam(ctx: RunContext[str], y: float) -> float:
        return ctx.deps + y

    agent = Agent('test', toolsets=[toolset], deps_type=int)
    result = agent.run_sync('foobar', deps=1)
    print(result.output)
    #> {"foobar":1,"spam":1.0}
    ```

    Args:
        func: The tool function to register.
        name: The name of the tool, defaults to the function name.
        description: The description of the tool,defaults to the function docstring.
        retries: The number of retries to allow for this tool, defaults to the agent's default retries,
            which defaults to 1.
        prepare: custom method to prepare the tool definition for each step, return `None` to omit this
            tool from a given step. This is useful if you want to customise a tool at call time,
            or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
        args_validator: custom method to validate tool arguments after schema validation has passed,
            before execution. The validator receives the already-validated and type-converted parameters,
            with `RunContext` as the first argument.
            Should raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] on validation failure,
            return `None` on success.
            See [`ArgsValidatorFunc`][pydantic_ai.tools.ArgsValidatorFunc].
        docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
            If `None`, the default value is determined by the toolset.
        require_parameter_descriptions: If True, raise an error if a parameter description is missing.
            If `None`, the default value is determined by the toolset.
        schema_generator: The JSON schema generator class to use for this tool.
            If `None`, the default value is determined by the toolset.
        strict: Whether to enforce JSON schema compliance (only affects OpenAI).
            See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
            If `None`, the default value is determined by the toolset.
        sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
            If `None`, the default value is determined by the toolset.
        requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
            See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
            If `None`, the default value is determined by the toolset.
        metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
            If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
        timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model.
            Defaults to None (no timeout).
        defer_loading: Whether to hide this tool until it's discovered via tool search.
            See [Tool Search](../tools-advanced.md#tool-search) for more info.
            If `None`, the default value is determined by the toolset.
    """

    def tool_decorator(
        func_: ToolFuncContext[AgentDepsT, ToolParams],
    ) -> ToolFuncContext[AgentDepsT, ToolParams]:
        # TODO(v2): Remove this deprecation fallback
        #  and let takes_ctx=True propagate, which will raise a runtime error
        #  in function_schema if the function doesn't accept RunContext.

        # Is the func actually taking RunContext or is it a plain function in disguise?

        tool = self.add_function(
            func=func_,
            takes_ctx=None,
            name=name,
            description=description,
            retries=retries,
            prepare=prepare,
            args_validator=args_validator,
            docstring_format=docstring_format,
            require_parameter_descriptions=require_parameter_descriptions,
            schema_generator=schema_generator,
            strict=strict,
            sequential=sequential,
            requires_approval=requires_approval,
            metadata=metadata,
            timeout=timeout,
            defer_loading=defer_loading,
        )
        if not tool.function_schema.takes_ctx:
            warnings.warn(
                'Passing a function without `RunContext` to `FunctionToolset.tool()` is deprecated, use `tool_plain()` instead.',
                DeprecationWarning,
                stacklevel=2,
            )

        return func_

    return tool_decorator if func is None else tool_decorator(func)

tool_plain

tool_plain(
    *,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: (
        ArgsValidatorFunc[AgentDepsT, ToolParams] | None
    ) = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: (
        type[GenerateJsonSchema] | None
    ) = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
    defer_loading: bool | None = None
) -> Callable[
    [ToolFuncPlain[ToolParams]], ToolFuncPlain[ToolParams]
]
tool_plain(
    func: ToolFuncPlain[ToolParams] | None = None,
    /,
    *,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: (
        ArgsValidatorFunc[AgentDepsT, ToolParams] | None
    ) = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: (
        type[GenerateJsonSchema] | None
    ) = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
    defer_loading: bool | None = None,
) -> Any

Decorator to register a tool function which DOES NOT take RunContext as an argument.

Can decorate a sync or async functions.

The docstring is inspected to extract both the tool description and description of each parameter, learn more.

We can't add overloads for every possible signature of tool, since the return type is a recursive union so the signature of functions decorated with @toolset.tool_plain is obscured.

Example:

from pydantic_ai import Agent, FunctionToolset

toolset = FunctionToolset()

@toolset.tool_plain
def foobar(x: int) -> int:
    return x + 1

@toolset.tool_plain(retries=2)
async def spam(y: float) -> float:
    return y * 2.0

agent = Agent('test', toolsets=[toolset])
result = agent.run_sync('foobar')
print(result.output)
#> {"foobar":1,"spam":0.0}

Parameters:

Name Type Description Default
func ToolFuncPlain[ToolParams] | None

The tool function to register.

None
name str | None

The name of the tool, defaults to the function name.

None
description str | None

The description of the tool, defaults to the function docstring.

None
retries int | None

The number of retries to allow for this tool, defaults to the toolset's default retries, which defaults to 1.

None
prepare ToolPrepareFunc[AgentDepsT] | None

custom method to prepare the tool definition for each step, return None to omit this tool from a given step. This is useful if you want to customise a tool at call time, or omit it completely from a step. See ToolPrepareFunc.

None
args_validator ArgsValidatorFunc[AgentDepsT, ToolParams] | None

custom method to validate tool arguments after schema validation has passed, before execution. The validator receives the already-validated and type-converted parameters, with RunContext as the first argument — even though the tool function itself does not take RunContext when using tool_plain. Should raise ModelRetry on validation failure, return None on success. See ArgsValidatorFunc.

None
docstring_format DocstringFormat | None

The format of the docstring, see DocstringFormat. If None, the default value is determined by the toolset.

None
require_parameter_descriptions bool | None

If True, raise an error if a parameter description is missing. If None, the default value is determined by the toolset.

None
schema_generator type[GenerateJsonSchema] | None

The JSON schema generator class to use for this tool. If None, the default value is determined by the toolset.

None
strict bool | None

Whether to enforce JSON schema compliance (only affects OpenAI). See ToolDefinition for more info. If None, the default value is determined by the toolset.

None
sequential bool | None

Whether the function requires a sequential/serial execution environment. Defaults to False. If None, the default value is determined by the toolset.

None
requires_approval bool | None

Whether this tool requires human-in-the-loop approval. Defaults to False. See the tools documentation for more info. If None, the default value is determined by the toolset.

None
metadata dict[str, Any] | None

Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. If None, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.

None
timeout float | None

Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. Defaults to None (no timeout).

None
defer_loading bool | None

Whether to hide this tool until it's discovered via tool search. See Tool Search for more info. If None, the default value is determined by the toolset.

None
Source code in pydantic_ai_slim/pydantic_ai/toolsets/function.py
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
def tool_plain(
    self,
    func: ToolFuncPlain[ToolParams] | None = None,
    /,
    *,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: type[GenerateJsonSchema] | None = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
    defer_loading: bool | None = None,
) -> Any:
    """Decorator to register a tool function which DOES NOT take `RunContext` as an argument.

    Can decorate a sync or async functions.

    The docstring is inspected to extract both the tool description and description of each parameter,
    [learn more](../tools.md#function-tools-and-schema).

    We can't add overloads for every possible signature of tool, since the return type is a recursive union
    so the signature of functions decorated with `@toolset.tool_plain` is obscured.

    Example:
    ```python
    from pydantic_ai import Agent, FunctionToolset

    toolset = FunctionToolset()

    @toolset.tool_plain
    def foobar(x: int) -> int:
        return x + 1

    @toolset.tool_plain(retries=2)
    async def spam(y: float) -> float:
        return y * 2.0

    agent = Agent('test', toolsets=[toolset])
    result = agent.run_sync('foobar')
    print(result.output)
    #> {"foobar":1,"spam":0.0}
    ```

    Args:
        func: The tool function to register.
        name: The name of the tool, defaults to the function name.
        description: The description of the tool, defaults to the function docstring.
        retries: The number of retries to allow for this tool, defaults to the toolset's default retries,
            which defaults to 1.
        prepare: custom method to prepare the tool definition for each step, return `None` to omit this
            tool from a given step. This is useful if you want to customise a tool at call time,
            or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
        args_validator: custom method to validate tool arguments after schema validation has passed,
            before execution. The validator receives the already-validated and type-converted parameters,
            with [`RunContext`][pydantic_ai.tools.RunContext] as the first argument — even though the
            tool function itself does not take `RunContext` when using `tool_plain`.
            Should raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] on validation failure,
            return `None` on success.
            See [`ArgsValidatorFunc`][pydantic_ai.tools.ArgsValidatorFunc].
        docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
            If `None`, the default value is determined by the toolset.
        require_parameter_descriptions: If True, raise an error if a parameter description is missing.
            If `None`, the default value is determined by the toolset.
        schema_generator: The JSON schema generator class to use for this tool.
            If `None`, the default value is determined by the toolset.
        strict: Whether to enforce JSON schema compliance (only affects OpenAI).
            See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
            If `None`, the default value is determined by the toolset.
        sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
            If `None`, the default value is determined by the toolset.
        requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
            See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
            If `None`, the default value is determined by the toolset.
        metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
            If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
        timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model.
            Defaults to None (no timeout).
        defer_loading: Whether to hide this tool until it's discovered via tool search.
            See [Tool Search](../tools-advanced.md#tool-search) for more info.
            If `None`, the default value is determined by the toolset.
    """

    def tool_decorator(
        func_: ToolFuncPlain[ToolParams],
    ) -> ToolFuncPlain[ToolParams]:
        # noinspection PyTypeChecker
        self.add_function(
            func=func_,
            takes_ctx=False,
            name=name,
            description=description,
            retries=retries,
            prepare=prepare,
            args_validator=args_validator,
            docstring_format=docstring_format,
            require_parameter_descriptions=require_parameter_descriptions,
            schema_generator=schema_generator,
            strict=strict,
            sequential=sequential,
            requires_approval=requires_approval,
            metadata=metadata,
            timeout=timeout,
            defer_loading=defer_loading,
        )
        return func_

    return tool_decorator if func is None else tool_decorator(func)

instructions

Decorator to register an instructions function for this toolset.

The function can be sync or async, and can optionally take a RunContext as its first argument.

Example:

from pydantic_ai import FunctionToolset, RunContext

toolset = FunctionToolset[int]()

@toolset.instructions
def my_instructions(ctx: RunContext[int]) -> str:
    return 'Always use the search tool when looking for information.'

@toolset.tool
def search(ctx: RunContext[int], query: str) -> str:
    return f'Results for: {query}'

Parameters:

Name Type Description Default
func SystemPromptFunc[AgentDepsT]

The instructions function to register.

required
Source code in pydantic_ai_slim/pydantic_ai/toolsets/function.py
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
def instructions(
    self,
    func: SystemPromptFunc[AgentDepsT],
    /,
) -> SystemPromptFunc[AgentDepsT]:
    """Decorator to register an instructions function for this toolset.

    The function can be sync or async, and can optionally take a
    [`RunContext`][pydantic_ai.tools.RunContext] as its first argument.

    Example:
    ```python
    from pydantic_ai import FunctionToolset, RunContext

    toolset = FunctionToolset[int]()

    @toolset.instructions
    def my_instructions(ctx: RunContext[int]) -> str:
        return 'Always use the search tool when looking for information.'

    @toolset.tool
    def search(ctx: RunContext[int], query: str) -> str:
        return f'Results for: {query}'
    ```

    Args:
        func: The instructions function to register.
    """
    self._instructions.append(SystemPromptRunner(func))
    return func

add_function

add_function(
    func: ToolFuncEither[AgentDepsT, ToolParams],
    takes_ctx: bool | None = None,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: (
        ArgsValidatorFunc[AgentDepsT, ToolParams] | None
    ) = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: (
        type[GenerateJsonSchema] | None
    ) = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    defer_loading: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
) -> Tool[AgentDepsT]

Add a function as a tool to the toolset.

Can take a sync or async function.

The docstring is inspected to extract both the tool description and description of each parameter, learn more.

Parameters:

Name Type Description Default
func ToolFuncEither[AgentDepsT, ToolParams]

The tool function to register.

required
takes_ctx bool | None

Whether the function takes a RunContext as its first argument. If None, this is inferred from the function signature.

None
name str | None

The name of the tool, defaults to the function name.

None
description str | None

The description of the tool, defaults to the function docstring.

None
retries int | None

The number of retries to allow for this tool, defaults to the agent's default retries, which defaults to 1.

None
prepare ToolPrepareFunc[AgentDepsT] | None

custom method to prepare the tool definition for each step, return None to omit this tool from a given step. This is useful if you want to customise a tool at call time, or omit it completely from a step. See ToolPrepareFunc.

None
args_validator ArgsValidatorFunc[AgentDepsT, ToolParams] | None

custom method to validate tool arguments after schema validation has passed, before execution. The validator receives the already-validated and type-converted parameters, with RunContext as the first argument. Should raise ModelRetry on validation failure, return None on success. See ArgsValidatorFunc.

None
docstring_format DocstringFormat | None

The format of the docstring, see DocstringFormat. If None, the default value is determined by the toolset.

None
require_parameter_descriptions bool | None

If True, raise an error if a parameter description is missing. If None, the default value is determined by the toolset.

None
schema_generator type[GenerateJsonSchema] | None

The JSON schema generator class to use for this tool. If None, the default value is determined by the toolset.

None
strict bool | None

Whether to enforce JSON schema compliance (only affects OpenAI). See ToolDefinition for more info. If None, the default value is determined by the toolset.

None
sequential bool | None

Whether the function requires a sequential/serial execution environment. Defaults to False. If None, the default value is determined by the toolset.

None
requires_approval bool | None

Whether this tool requires human-in-the-loop approval. Defaults to False. See the tools documentation for more info. If None, the default value is determined by the toolset.

None
defer_loading bool | None

Whether to hide this tool until it's discovered via tool search. See Tool Search for more info. If None, the default value is determined by the toolset.

None
metadata dict[str, Any] | None

Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. If None, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.

None
timeout float | None

Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. Defaults to None (no timeout).

None
Source code in pydantic_ai_slim/pydantic_ai/toolsets/function.py
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
def add_function(
    self,
    func: ToolFuncEither[AgentDepsT, ToolParams],
    takes_ctx: bool | None = None,
    name: str | None = None,
    description: str | None = None,
    retries: int | None = None,
    prepare: ToolPrepareFunc[AgentDepsT] | None = None,
    args_validator: ArgsValidatorFunc[AgentDepsT, ToolParams] | None = None,
    docstring_format: DocstringFormat | None = None,
    require_parameter_descriptions: bool | None = None,
    schema_generator: type[GenerateJsonSchema] | None = None,
    strict: bool | None = None,
    sequential: bool | None = None,
    requires_approval: bool | None = None,
    defer_loading: bool | None = None,
    metadata: dict[str, Any] | None = None,
    timeout: float | None = None,
) -> Tool[AgentDepsT]:
    """Add a function as a tool to the toolset.

    Can take a sync or async function.

    The docstring is inspected to extract both the tool description and description of each parameter,
    [learn more](../tools.md#function-tools-and-schema).

    Args:
        func: The tool function to register.
        takes_ctx: Whether the function takes a [`RunContext`][pydantic_ai.tools.RunContext] as its first argument. If `None`, this is inferred from the function signature.
        name: The name of the tool, defaults to the function name.
        description: The description of the tool, defaults to the function docstring.
        retries: The number of retries to allow for this tool, defaults to the agent's default retries,
            which defaults to 1.
        prepare: custom method to prepare the tool definition for each step, return `None` to omit this
            tool from a given step. This is useful if you want to customise a tool at call time,
            or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
        args_validator: custom method to validate tool arguments after schema validation has passed,
            before execution. The validator receives the already-validated and type-converted parameters,
            with `RunContext` as the first argument.
            Should raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] on validation failure,
            return `None` on success.
            See [`ArgsValidatorFunc`][pydantic_ai.tools.ArgsValidatorFunc].
        docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
            If `None`, the default value is determined by the toolset.
        require_parameter_descriptions: If True, raise an error if a parameter description is missing.
            If `None`, the default value is determined by the toolset.
        schema_generator: The JSON schema generator class to use for this tool.
            If `None`, the default value is determined by the toolset.
        strict: Whether to enforce JSON schema compliance (only affects OpenAI).
            See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
            If `None`, the default value is determined by the toolset.
        sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
            If `None`, the default value is determined by the toolset.
        requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
            See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
            If `None`, the default value is determined by the toolset.
        defer_loading: Whether to hide this tool until it's discovered via tool search.
            See [Tool Search](../tools-advanced.md#tool-search) for more info.
            If `None`, the default value is determined by the toolset.
        metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
            If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
        timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model.
            Defaults to None (no timeout).
    """
    if docstring_format is None:
        docstring_format = self.docstring_format
    if require_parameter_descriptions is None:
        require_parameter_descriptions = self.require_parameter_descriptions
    if schema_generator is None:
        schema_generator = self.schema_generator
    if strict is None:
        strict = self.strict
    if sequential is None:
        sequential = self.sequential
    if requires_approval is None:
        requires_approval = self.requires_approval
    if defer_loading is None:
        defer_loading = self._defer_loading

    tool = Tool[AgentDepsT](
        func,
        takes_ctx=takes_ctx,
        name=name,
        description=description,
        max_retries=retries,
        prepare=prepare,
        args_validator=args_validator,
        docstring_format=docstring_format,
        require_parameter_descriptions=require_parameter_descriptions,
        schema_generator=schema_generator,
        strict=strict,
        sequential=sequential,
        requires_approval=requires_approval,
        metadata=metadata,
        timeout=timeout,
        defer_loading=defer_loading,
    )
    self.add_tool(tool)
    return tool

add_tool

add_tool(tool: Tool[AgentDepsT]) -> None

Add a tool to the toolset.

Parameters:

Name Type Description Default
tool Tool[AgentDepsT]

The tool to add.

required
Source code in pydantic_ai_slim/pydantic_ai/toolsets/function.py
562
563
564
565
566
567
568
569
570
571
572
573
574
def add_tool(self, tool: Tool[AgentDepsT]) -> None:
    """Add a tool to the toolset.

    Args:
        tool: The tool to add.
    """
    if tool.name in self.tools:
        raise UserError(f'Tool name conflicts with existing tool: {tool.name!r}')
    if tool.max_retries is None:
        tool.max_retries = self.max_retries
    if self.metadata is not None:
        tool.metadata = self.metadata | (tool.metadata or {})
    self.tools[tool.name] = tool

DeferredLoadingToolset dataclass

Bases: PreparedToolset[AgentDepsT]

A toolset that marks tools for deferred loading, hiding them from the model until discovered via tool search.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/deferred_loading.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@dataclass(init=False)
class DeferredLoadingToolset(PreparedToolset[AgentDepsT]):
    """A toolset that marks tools for deferred loading, hiding them from the model until discovered via tool search.

    See [toolset docs](../toolsets.md#deferred-loading) for more information.
    """

    prepare_func: ToolsPrepareFunc[AgentDepsT] = field(init=False, repr=False)
    tool_names: frozenset[str] | None = None
    """Optional set of tool names to mark for deferred loading. If `None`, all tools are marked for deferred loading."""

    def __init__(
        self,
        wrapped: AbstractToolset[AgentDepsT],
        *,
        tool_names: frozenset[str] | None = None,
    ):
        self.tool_names = tool_names

        async def _mark_deferred(_ctx: RunContext[AgentDepsT], tool_defs: list[ToolDefinition]) -> list[ToolDefinition]:
            return [
                replace(td, defer_loading=True) if (tool_names is None or td.name in tool_names) else td
                for td in tool_defs
            ]

        self.wrapped = wrapped
        self.prepare_func = _mark_deferred

tool_names class-attribute instance-attribute

tool_names: frozenset[str] | None = tool_names

Optional set of tool names to mark for deferred loading. If None, all tools are marked for deferred loading.

PrefixedToolset dataclass

Bases: WrapperToolset[AgentDepsT]

A toolset that prefixes the names of the tools it contains.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/prefixed.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
@dataclass
class PrefixedToolset(WrapperToolset[AgentDepsT]):
    """A toolset that prefixes the names of the tools it contains.

    See [toolset docs](../toolsets.md#prefixing-tool-names) for more information.
    """

    prefix: str

    @property
    def tool_name_conflict_hint(self) -> str:
        return 'Change the `prefix` attribute to avoid name conflicts.'

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        return {
            new_name: replace(
                tool,
                toolset=self,
                tool_def=replace(tool.tool_def, name=new_name),
            )
            for name, tool in (await super().get_tools(ctx)).items()
            if (new_name := f'{self.prefix}_{name}')
        }

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        original_name = name.removeprefix(self.prefix + '_')
        ctx = replace(ctx, tool_name=original_name)
        tool = replace(tool, tool_def=replace(tool.tool_def, name=original_name))
        return await super().call_tool(original_name, tool_args, ctx, tool)

RenamedToolset dataclass

Bases: WrapperToolset[AgentDepsT]

A toolset that renames the tools it contains using a dictionary mapping new names to original names.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/renamed.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
@dataclass
class RenamedToolset(WrapperToolset[AgentDepsT]):
    """A toolset that renames the tools it contains using a dictionary mapping new names to original names.

    See [toolset docs](../toolsets.md#renaming-tools) for more information.
    """

    name_map: dict[str, str]

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        original_to_new_name_map = {v: k for k, v in self.name_map.items()}
        original_tools = await super().get_tools(ctx)
        tools: dict[str, ToolsetTool[AgentDepsT]] = {}
        for original_name, tool in original_tools.items():
            new_name = original_to_new_name_map.get(original_name, None)
            if new_name:
                tools[new_name] = replace(
                    tool,
                    toolset=self,
                    tool_def=replace(tool.tool_def, name=new_name),
                )
            else:
                tools[original_name] = tool
        return tools

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        original_name = self.name_map.get(name, name)
        ctx = replace(ctx, tool_name=original_name)
        tool = replace(tool, tool_def=replace(tool.tool_def, name=original_name))
        return await super().call_tool(original_name, tool_args, ctx, tool)

PreparedToolset dataclass

Bases: WrapperToolset[AgentDepsT]

A toolset that prepares the tools it contains using a prepare function that takes the agent context and the original tool definitions.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/prepared.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
@dataclass
class PreparedToolset(WrapperToolset[AgentDepsT]):
    """A toolset that prepares the tools it contains using a prepare function that takes the agent context and the original tool definitions.

    See [toolset docs](../toolsets.md#preparing-tool-definitions) for more information.
    """

    prepare_func: ToolsPrepareFunc[AgentDepsT]

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        original_tools = await super().get_tools(ctx)
        original_tool_defs = [tool.tool_def for tool in original_tools.values()]
        result = self.prepare_func(ctx, original_tool_defs)
        if inspect.isawaitable(result):
            result = await result
        prepared_tool_defs_by_name = {tool_def.name: tool_def for tool_def in (result or [])}

        if len(prepared_tool_defs_by_name.keys() - original_tools.keys()) > 0:
            raise UserError(
                'Prepare function cannot add or rename tools. Use `FunctionToolset.add_function()` or `RenamedToolset` instead.'
            )

        return {
            name: replace(original_tools[name], tool_def=tool_def)
            for name, tool_def in prepared_tool_defs_by_name.items()
        }

WrapperToolset dataclass

Bases: AbstractToolset[AgentDepsT]

A toolset that wraps another toolset and delegates to it.

See toolset docs for more information.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/wrapper.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
@dataclass
class WrapperToolset(AbstractToolset[AgentDepsT]):
    """A toolset that wraps another toolset and delegates to it.

    See [toolset docs](../toolsets.md#wrapping-a-toolset) for more information.
    """

    wrapped: AbstractToolset[AgentDepsT]

    @property
    def id(self) -> str | None:
        return None  # pragma: no cover

    @property
    def label(self) -> str:
        return f'{self.__class__.__name__}({self.wrapped.label})'

    async def for_run(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
        new_wrapped = await self.wrapped.for_run(ctx)
        if new_wrapped is self.wrapped:
            return self
        return replace(self, wrapped=new_wrapped)

    async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> AbstractToolset[AgentDepsT]:
        new_wrapped = await self.wrapped.for_run_step(ctx)
        if new_wrapped is self.wrapped:
            return self
        return replace(self, wrapped=new_wrapped)

    async def __aenter__(self) -> Self:
        await self.wrapped.__aenter__()
        return self

    async def __aexit__(self, *args: Any) -> bool | None:
        return await self.wrapped.__aexit__(*args)

    async def get_instructions(
        self, ctx: RunContext[AgentDepsT]
    ) -> str | InstructionPart | Sequence[str | InstructionPart] | None:
        """Delegate instructions to the wrapped toolset.

        This explicit delegation ensures type safety and proper propagation of the
        instructions from wrapped toolsets to the agent's system prompt.
        """
        return await self.wrapped.get_instructions(ctx)

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        return await self.wrapped.get_tools(ctx)

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        return await self.wrapped.call_tool(name, tool_args, ctx, tool)

    def apply(self, visitor: Callable[[AbstractToolset[AgentDepsT]], None]) -> None:
        self.wrapped.apply(visitor)

    def visit_and_replace(
        self, visitor: Callable[[AbstractToolset[AgentDepsT]], AbstractToolset[AgentDepsT]]
    ) -> AbstractToolset[AgentDepsT]:
        return replace(self, wrapped=self.wrapped.visit_and_replace(visitor))

get_instructions async

get_instructions(
    ctx: RunContext[AgentDepsT],
) -> (
    str
    | InstructionPart
    | Sequence[str | InstructionPart]
    | None
)

Delegate instructions to the wrapped toolset.

This explicit delegation ensures type safety and proper propagation of the instructions from wrapped toolsets to the agent's system prompt.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/wrapper.py
50
51
52
53
54
55
56
57
58
async def get_instructions(
    self, ctx: RunContext[AgentDepsT]
) -> str | InstructionPart | Sequence[str | InstructionPart] | None:
    """Delegate instructions to the wrapped toolset.

    This explicit delegation ensures type safety and proper propagation of the
    instructions from wrapped toolsets to the agent's system prompt.
    """
    return await self.wrapped.get_instructions(ctx)

ToolsetFunc module-attribute

A sync/async function which takes a run context and returns a toolset.

FastMCPToolset dataclass

Bases: AbstractToolset[AgentDepsT]

A FastMCP Toolset that uses the FastMCP Client to call tools from a local or remote MCP Server.

The Toolset can accept a FastMCP Client, a FastMCP Transport, or any other object which a FastMCP Transport can be created from.

See https://gofastmcp.com/clients/transports for a full list of transports available.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/fastmcp.py
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
@dataclass(init=False)
class FastMCPToolset(AbstractToolset[AgentDepsT]):
    """A FastMCP Toolset that uses the FastMCP Client to call tools from a local or remote MCP Server.

    The Toolset can accept a FastMCP Client, a FastMCP Transport, or any other object which a FastMCP Transport can be created from.

    See https://gofastmcp.com/clients/transports for a full list of transports available.
    """

    client: Client[Any]
    """The FastMCP client to use."""

    _: KW_ONLY

    tool_error_behavior: Literal['model_retry', 'error']
    """The behavior to take when a tool error occurs."""

    max_retries: int
    """The maximum number of retries to attempt if a tool call fails."""

    include_instructions: bool
    """Whether to include the server's instructions in the agent's instructions.

    Defaults to `False` for backward compatibility.
    """

    _id: str | None

    _instructions: str | None

    def __init__(
        self,
        client: Client[Any]
        | ClientTransport
        | FastMCP
        | FastMCP1Server
        | AnyUrl
        | Path
        | MCPConfig
        | dict[str, Any]
        | str,
        *,
        max_retries: int = 1,
        tool_error_behavior: Literal['model_retry', 'error'] = 'model_retry',
        include_instructions: bool = False,
        id: str | None = None,
    ) -> None:
        if isinstance(client, Client):
            self.client = client
        else:
            self.client = Client[Any](transport=client)

        self._id = id
        self.max_retries = max_retries
        self.tool_error_behavior = tool_error_behavior
        self.include_instructions = include_instructions

        self._enter_lock: Lock = Lock()
        self._running_count: int = 0
        self._exit_stack: AsyncExitStack | None = None

    @property
    def id(self) -> str | None:
        return self._id

    @property
    def instructions(self) -> str | None:
        """Access the instructions sent by the FastMCP server during initialization."""
        if not hasattr(self, '_instructions'):
            raise AttributeError(
                f'The `{self.__class__.__name__}.instructions` is only available after initialization.'
            )
        return self._instructions

    async def __aenter__(self) -> Self:
        async with self._enter_lock:
            if self._running_count == 0:
                self._exit_stack = AsyncExitStack()
                await self._exit_stack.enter_async_context(self.client)
                init_result = self.client.initialize_result
                assert init_result is not None, 'FastMCP Client initialization failed: initialize_result is None'
                self._instructions = init_result.instructions

            self._running_count += 1

        return self

    async def __aexit__(self, *args: Any) -> bool | None:
        async with self._enter_lock:
            self._running_count -= 1
            if self._running_count == 0 and self._exit_stack:
                await self._exit_stack.aclose()
                self._exit_stack = None
                self._instructions = None

        return None

    async def get_instructions(self, ctx: RunContext[AgentDepsT]) -> messages.InstructionPart | None:
        """Return the FastMCP server's instructions for how to use its tools.

        If [`include_instructions`][pydantic_ai.toolsets.fastmcp.FastMCPToolset.include_instructions] is `True`, returns
        the [`instructions`][pydantic_ai.toolsets.fastmcp.FastMCPToolset.instructions] sent by the FastMCP server during
        initialization. Otherwise, returns `None`.

        Instructions from external servers are marked as dynamic since they may change between connections.

        Args:
            ctx: The run context for this agent run.

        Returns:
            An `InstructionPart` with the server's instructions if `include_instructions` is enabled, otherwise `None`.
        """
        if not self.include_instructions:
            return None
        try:
            instructions = self.instructions
        except AttributeError:
            # Server not yet initialized — return None rather than propagating.
            return None
        if instructions is None:
            return None
        return messages.InstructionPart(content=instructions, dynamic=True)

    async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
        async with self:
            return {
                mcp_tool.name: self.tool_for_tool_def(
                    ToolDefinition(
                        name=mcp_tool.name,
                        description=mcp_tool.description,
                        parameters_json_schema=mcp_tool.inputSchema,
                        metadata={
                            'meta': mcp_tool.meta,
                            'annotations': mcp_tool.annotations.model_dump() if mcp_tool.annotations else None,
                            'output_schema': mcp_tool.outputSchema or None,
                        },
                    )
                )
                for mcp_tool in await self.client.list_tools()
            }

    async def call_tool(
        self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
    ) -> Any:
        async with self:
            try:
                call_tool_result: CallToolResult = await self.client.call_tool(name=name, arguments=tool_args)
            except ToolError as e:
                if self.tool_error_behavior == 'model_retry':
                    raise ModelRetry(message=str(e)) from e
                else:
                    raise e

        # If we have structured content, return that
        if call_tool_result.structured_content:
            return call_tool_result.structured_content

        # Otherwise, return the content
        return _map_fastmcp_tool_results(parts=call_tool_result.content)

    def tool_for_tool_def(self, tool_def: ToolDefinition) -> ToolsetTool[AgentDepsT]:
        return ToolsetTool[AgentDepsT](
            tool_def=tool_def,
            toolset=self,
            max_retries=self.max_retries,
            args_validator=TOOL_SCHEMA_VALIDATOR,
        )

client instance-attribute

client: Client[Any]

The FastMCP client to use.

max_retries instance-attribute

max_retries: int = max_retries

The maximum number of retries to attempt if a tool call fails.

tool_error_behavior instance-attribute

tool_error_behavior: Literal["model_retry", "error"] = (
    tool_error_behavior
)

The behavior to take when a tool error occurs.

include_instructions instance-attribute

include_instructions: bool = include_instructions

Whether to include the server's instructions in the agent's instructions.

Defaults to False for backward compatibility.

instructions property

instructions: str | None

Access the instructions sent by the FastMCP server during initialization.

get_instructions async

get_instructions(
    ctx: RunContext[AgentDepsT],
) -> InstructionPart | None

Return the FastMCP server's instructions for how to use its tools.

If include_instructions is True, returns the instructions sent by the FastMCP server during initialization. Otherwise, returns None.

Instructions from external servers are marked as dynamic since they may change between connections.

Parameters:

Name Type Description Default
ctx RunContext[AgentDepsT]

The run context for this agent run.

required

Returns:

Type Description
InstructionPart | None

An InstructionPart with the server's instructions if include_instructions is enabled, otherwise None.

Source code in pydantic_ai_slim/pydantic_ai/toolsets/fastmcp.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
async def get_instructions(self, ctx: RunContext[AgentDepsT]) -> messages.InstructionPart | None:
    """Return the FastMCP server's instructions for how to use its tools.

    If [`include_instructions`][pydantic_ai.toolsets.fastmcp.FastMCPToolset.include_instructions] is `True`, returns
    the [`instructions`][pydantic_ai.toolsets.fastmcp.FastMCPToolset.instructions] sent by the FastMCP server during
    initialization. Otherwise, returns `None`.

    Instructions from external servers are marked as dynamic since they may change between connections.

    Args:
        ctx: The run context for this agent run.

    Returns:
        An `InstructionPart` with the server's instructions if `include_instructions` is enabled, otherwise `None`.
    """
    if not self.include_instructions:
        return None
    try:
        instructions = self.instructions
    except AttributeError:
        # Server not yet initialized — return None rather than propagating.
        return None
    if instructions is None:
        return None
    return messages.InstructionPart(content=instructions, dynamic=True)