Skip to content

dottxt

Integration with Dottxt's API.

AsyncDottxt

Bases: AsyncModel

Async thin wrapper around the dottxt.client.AsyncDotTxt client.

This wrapper is used to convert the input and output types specified by the users at a higher level to arguments to the dottxt.client.AsyncDotTxt client.

Source code in outlines/models/dottxt.py
class AsyncDottxt(AsyncModel):
    """Async thin wrapper around the `dottxt.client.AsyncDotTxt` client.

    This wrapper is used to convert the input and output types specified by the
    users at a higher level to arguments to the `dottxt.client.AsyncDotTxt`
    client.

    """

    def __init__(
        self,
        client: "AsyncDottxtClient",
        model: Optional[str] = None,
    ):
        """
        Parameters
        ----------
        client
            A `dottxt.AsyncDotTxt` client.
        model
            The model identifier to use (e.g. ``"dottxt/dottxt-v1-alpha"``).

        """
        self.client = client
        self.model = model
        self.type_adapter = DottxtTypeAdapter()

    async def generate(
        self,
        model_input: str,
        output_type: Optional[Any] = None,
        **inference_kwargs: Any,
    ) -> str:
        """Generate text using Dottxt asynchronously.

        Parameters
        ----------
        model_input
            The prompt based on which the model will generate a response.
        output_type
            The desired format of the response generated by the model. The
            output type must be of a type that can be converted to a JSON
            schema.
        **inference_kwargs
            Additional keyword arguments to pass to the client.

        Returns
        -------
        str
            The text generated by the model as a JSON string.

        """
        prompt = self.type_adapter.format_input(model_input)
        json_schema = self.type_adapter.format_output_type(output_type)

        if "model" not in inference_kwargs and self.model is not None:
            inference_kwargs["model"] = self.model

        if "model" not in inference_kwargs:
            raise ValueError(
                "A model identifier is required. Pass it to `from_dottxt_async()` "
                "or as a `model=` keyword argument at generation time."
            )

        result = await self.client.generate(
            input=prompt,
            response_format=json_schema,
            **inference_kwargs,
        )
        return json.dumps(result)

    async def generate_batch(
        self,
        model_input,
        output_type=None,
        **inference_kwargs,
    ):
        raise NotImplementedError(
            "Dottxt does not support batch generation."
        )

    async def generate_stream(  # type: ignore[override]
        self,
        model_input,
        output_type=None,
        **inference_kwargs,
    ):
        raise NotImplementedError(
            "Dottxt does not support streaming. Call the model/generator for "
            + "regular generation instead."
        )
        yield  # makes this an async generator so `async for` can consume it

__init__(client, model=None)

Parameters:

Name Type Description Default
client AsyncDotTxt

A dottxt.AsyncDotTxt client.

required
model Optional[str]

The model identifier to use (e.g. "dottxt/dottxt-v1-alpha").

None
Source code in outlines/models/dottxt.py
def __init__(
    self,
    client: "AsyncDottxtClient",
    model: Optional[str] = None,
):
    """
    Parameters
    ----------
    client
        A `dottxt.AsyncDotTxt` client.
    model
        The model identifier to use (e.g. ``"dottxt/dottxt-v1-alpha"``).

    """
    self.client = client
    self.model = model
    self.type_adapter = DottxtTypeAdapter()

generate(model_input, output_type=None, **inference_kwargs) async

Generate text using Dottxt asynchronously.

Parameters:

Name Type Description Default
model_input str

The prompt based on which the model will generate a response.

required
output_type Optional[Any]

The desired format of the response generated by the model. The output type must be of a type that can be converted to a JSON schema.

None
**inference_kwargs Any

Additional keyword arguments to pass to the client.

{}

Returns:

Type Description
str

The text generated by the model as a JSON string.

Source code in outlines/models/dottxt.py
async def generate(
    self,
    model_input: str,
    output_type: Optional[Any] = None,
    **inference_kwargs: Any,
) -> str:
    """Generate text using Dottxt asynchronously.

    Parameters
    ----------
    model_input
        The prompt based on which the model will generate a response.
    output_type
        The desired format of the response generated by the model. The
        output type must be of a type that can be converted to a JSON
        schema.
    **inference_kwargs
        Additional keyword arguments to pass to the client.

    Returns
    -------
    str
        The text generated by the model as a JSON string.

    """
    prompt = self.type_adapter.format_input(model_input)
    json_schema = self.type_adapter.format_output_type(output_type)

    if "model" not in inference_kwargs and self.model is not None:
        inference_kwargs["model"] = self.model

    if "model" not in inference_kwargs:
        raise ValueError(
            "A model identifier is required. Pass it to `from_dottxt_async()` "
            "or as a `model=` keyword argument at generation time."
        )

    result = await self.client.generate(
        input=prompt,
        response_format=json_schema,
        **inference_kwargs,
    )
    return json.dumps(result)

Dottxt

Bases: Model

Thin wrapper around the dottxt.client.DotTxt client.

This wrapper is used to convert the input and output types specified by the users at a higher level to arguments to the dottxt.client.DotTxt client.

Source code in outlines/models/dottxt.py
class Dottxt(Model):
    """Thin wrapper around the `dottxt.client.DotTxt` client.

    This wrapper is used to convert the input and output types specified by the
    users at a higher level to arguments to the `dottxt.client.DotTxt` client.

    """

    def __init__(
        self,
        client: "DottxtClient",
        model: Optional[str] = None,
    ):
        """
        Parameters
        ----------
        client
            A `dottxt.DotTxt` client.
        model
            The model identifier to use (e.g. ``"dottxt/dottxt-v1-alpha"``).

        """
        self.client = client
        self.model = model
        self.type_adapter = DottxtTypeAdapter()

    def generate(
        self,
        model_input: str,
        output_type: Optional[Any] = None,
        **inference_kwargs: Any,
    ) -> str:
        """Generate text using Dottxt.

        Parameters
        ----------
        model_input
            The prompt based on which the model will generate a response.
        output_type
            The desired format of the response generated by the model. The
            output type must be of a type that can be converted to a JSON
            schema.
        **inference_kwargs
            Additional keyword arguments to pass to the client.

        Returns
        -------
        str
            The text generated by the model as a JSON string.

        """
        prompt = self.type_adapter.format_input(model_input)
        json_schema = self.type_adapter.format_output_type(output_type)

        if "model" not in inference_kwargs and self.model is not None:
            inference_kwargs["model"] = self.model

        if "model" not in inference_kwargs:
            raise ValueError(
                "A model identifier is required. Pass it to `from_dottxt()` "
                "or as a `model=` keyword argument at generation time."
            )

        result = self.client.generate(
            input=prompt,
            response_format=json_schema,
            **inference_kwargs,
        )
        return json.dumps(result)

    def generate_batch(
        self,
        model_input,
        output_type=None,
        **inference_kwargs,
    ):
        raise NotImplementedError(
            "Dottxt does not support batch generation."
        )

    def generate_stream(
        self,
        model_input,
        output_type=None,
        **inference_kwargs,
    ):
        raise NotImplementedError(
            "Dottxt does not support streaming. Call the model/generator for "
            + "regular generation instead."
        )

__init__(client, model=None)

Parameters:

Name Type Description Default
client DotTxt

A dottxt.DotTxt client.

required
model Optional[str]

The model identifier to use (e.g. "dottxt/dottxt-v1-alpha").

None
Source code in outlines/models/dottxt.py
def __init__(
    self,
    client: "DottxtClient",
    model: Optional[str] = None,
):
    """
    Parameters
    ----------
    client
        A `dottxt.DotTxt` client.
    model
        The model identifier to use (e.g. ``"dottxt/dottxt-v1-alpha"``).

    """
    self.client = client
    self.model = model
    self.type_adapter = DottxtTypeAdapter()

generate(model_input, output_type=None, **inference_kwargs)

Generate text using Dottxt.

Parameters:

Name Type Description Default
model_input str

The prompt based on which the model will generate a response.

required
output_type Optional[Any]

The desired format of the response generated by the model. The output type must be of a type that can be converted to a JSON schema.

None
**inference_kwargs Any

Additional keyword arguments to pass to the client.

{}

Returns:

Type Description
str

The text generated by the model as a JSON string.

Source code in outlines/models/dottxt.py
def generate(
    self,
    model_input: str,
    output_type: Optional[Any] = None,
    **inference_kwargs: Any,
) -> str:
    """Generate text using Dottxt.

    Parameters
    ----------
    model_input
        The prompt based on which the model will generate a response.
    output_type
        The desired format of the response generated by the model. The
        output type must be of a type that can be converted to a JSON
        schema.
    **inference_kwargs
        Additional keyword arguments to pass to the client.

    Returns
    -------
    str
        The text generated by the model as a JSON string.

    """
    prompt = self.type_adapter.format_input(model_input)
    json_schema = self.type_adapter.format_output_type(output_type)

    if "model" not in inference_kwargs and self.model is not None:
        inference_kwargs["model"] = self.model

    if "model" not in inference_kwargs:
        raise ValueError(
            "A model identifier is required. Pass it to `from_dottxt()` "
            "or as a `model=` keyword argument at generation time."
        )

    result = self.client.generate(
        input=prompt,
        response_format=json_schema,
        **inference_kwargs,
    )
    return json.dumps(result)

DottxtTypeAdapter

Bases: ModelTypeAdapter

Type adapter for the Dottxt model.

Source code in outlines/models/dottxt.py
class DottxtTypeAdapter(ModelTypeAdapter):
    """Type adapter for the `Dottxt` model."""

    def format_input(self, model_input: str) -> str:
        """Format the prompt to pass to the client.

        Parameters
        ----------
        model_input
            The input provided by the user.

        Returns
        -------
        str
            The input to pass to the client.

        """
        if isinstance(model_input, str):
            return model_input
        raise TypeError(
            f"The input type {model_input} is not available with Dottxt. "
            "The only available type is `str`."
        )

    def format_output_type(self, output_type: Optional[Any] = None) -> str:
        """Format the output type to pass to the client.

        Parameters
        ----------
        output_type
            The output type provided by the user.

        Returns
        -------
        str
            The output type to pass to the client as a JSON schema string.

        """
        if output_type is None:
            raise TypeError(
                "You must provide an output type. Dottxt only supports "
                "constrained generation."
            )
        elif isinstance(output_type, Regex):
            raise TypeError(
                "Regex-based structured outputs will soon be available with "
                "Dottxt. Use an open source model in the meantime."
            )
        elif isinstance(output_type, CFG):
            raise TypeError(
                "CFG-based structured outputs will soon be available with "
                "Dottxt. Use an open source model in the meantime."
            )
        elif JsonSchema.is_json_schema(output_type):
            return cast(str, JsonSchema.convert_to(output_type, ["str"]))
        else:
            type_name = getattr(output_type, "__name__", output_type)
            raise TypeError(
                f"The type `{type_name}` is not supported by Dottxt. "
                "Consider using a local mode instead."
            )

format_input(model_input)

Format the prompt to pass to the client.

Parameters:

Name Type Description Default
model_input str

The input provided by the user.

required

Returns:

Type Description
str

The input to pass to the client.

Source code in outlines/models/dottxt.py
def format_input(self, model_input: str) -> str:
    """Format the prompt to pass to the client.

    Parameters
    ----------
    model_input
        The input provided by the user.

    Returns
    -------
    str
        The input to pass to the client.

    """
    if isinstance(model_input, str):
        return model_input
    raise TypeError(
        f"The input type {model_input} is not available with Dottxt. "
        "The only available type is `str`."
    )

format_output_type(output_type=None)

Format the output type to pass to the client.

Parameters:

Name Type Description Default
output_type Optional[Any]

The output type provided by the user.

None

Returns:

Type Description
str

The output type to pass to the client as a JSON schema string.

Source code in outlines/models/dottxt.py
def format_output_type(self, output_type: Optional[Any] = None) -> str:
    """Format the output type to pass to the client.

    Parameters
    ----------
    output_type
        The output type provided by the user.

    Returns
    -------
    str
        The output type to pass to the client as a JSON schema string.

    """
    if output_type is None:
        raise TypeError(
            "You must provide an output type. Dottxt only supports "
            "constrained generation."
        )
    elif isinstance(output_type, Regex):
        raise TypeError(
            "Regex-based structured outputs will soon be available with "
            "Dottxt. Use an open source model in the meantime."
        )
    elif isinstance(output_type, CFG):
        raise TypeError(
            "CFG-based structured outputs will soon be available with "
            "Dottxt. Use an open source model in the meantime."
        )
    elif JsonSchema.is_json_schema(output_type):
        return cast(str, JsonSchema.convert_to(output_type, ["str"]))
    else:
        type_name = getattr(output_type, "__name__", output_type)
        raise TypeError(
            f"The type `{type_name}` is not supported by Dottxt. "
            "Consider using a local mode instead."
        )

from_dottxt(client, model=None)

Create an Outlines Dottxt or AsyncDottxt model instance from a dottxt.DotTxt or dottxt.AsyncDotTxt client instance.

Parameters:

Name Type Description Default
client Union[DotTxt, AsyncDotTxt]

A dottxt.DotTxt or dottxt.AsyncDotTxt client instance.

required
model Optional[str]

The model identifier to use (e.g. "dottxt/dottxt-v1-alpha").

None

Returns:

Type Description
Union[Dottxt, AsyncDottxt]

An Outlines Dottxt or AsyncDottxt model instance.

Source code in outlines/models/dottxt.py
def from_dottxt(
    client: "Union[DottxtClient, AsyncDottxtClient]",
    model: Optional[str] = None,
) -> Union[Dottxt, AsyncDottxt]:
    """Create an Outlines `Dottxt` or `AsyncDottxt` model instance from a
    `dottxt.DotTxt` or `dottxt.AsyncDotTxt` client instance.

    Parameters
    ----------
    client
        A `dottxt.DotTxt` or `dottxt.AsyncDotTxt` client instance.
    model
        The model identifier to use (e.g. ``"dottxt/dottxt-v1-alpha"``).

    Returns
    -------
    Union[Dottxt, AsyncDottxt]
        An Outlines `Dottxt` or `AsyncDottxt` model instance.

    """
    from dottxt import AsyncDotTxt, DotTxt

    if isinstance(client, DotTxt):
        return Dottxt(client, model)
    elif isinstance(client, AsyncDotTxt):
        return AsyncDottxt(client, model)
    else:
        raise ValueError(
            "Invalid client type. The client must be an instance of "
            "`dottxt.DotTxt` or `dottxt.AsyncDotTxt`."
        )