Skip to content

generator

Encapsulate a model and an output type into a reusable object.

AsyncBlackBoxGenerator

Asynchronous generator for which we don't control constrained generation.

The output type provided is not compiled into a logits processor, but is instead directly passed on to the model.

Source code in outlines/generator.py
class AsyncBlackBoxGenerator:
    """Asynchronous generator for which we don't control constrained
    generation.

    The output type provided is not compiled into a logits processor, but is
    instead directly passed on to the model.

    """
    output_type: Optional[Any]

    def __init__(self, model: AsyncBlackBoxModel, output_type: Optional[Any]):
        """
        Parameters
        ----------
        model
            An instance of an Outlines model.
        output_type
            The output type that will be used to constrain the generation.

        """
        self.model = model
        self.output_type = output_type

        if isinstance(self.output_type, FSM):
            raise NotImplementedError(
                "FSM generation is not supported for API-based models"
            )

    async def __call__(self, prompt: Any, **inference_kwargs) -> Any:
        """Generate a response from the model.

        Parameters
        ----------
        prompt
            The prompt to use to generate a response.
        **inference_kwargs
            Additional keyword arguments to pass to the model.

        Returns
        -------
        Any
            The response generated by the model.

        """
        return await self.model.generate(
            prompt, self.output_type, **inference_kwargs
        )

    async def stream(
        self, prompt: Any, **inference_kwargs
    ) -> AsyncIterator[Any]:
        """Generate a stream of responses from the model.

        Parameters
        ----------
        prompt
            The prompt to use to generate a response.
        **inference_kwargs
            Additional keyword arguments to pass to the model.

        Returns
        -------
        Any
            The response generated by the model.

        """
        async for chunk in self.model.generate_stream(  # pragma: no cover
            prompt, self.output_type, **inference_kwargs
        ):
            yield chunk

__call__(prompt, **inference_kwargs) async

Generate a response from the model.

Parameters:

Name Type Description Default
prompt Any

The prompt to use to generate a response.

required
**inference_kwargs

Additional keyword arguments to pass to the model.

{}

Returns:

Type Description
Any

The response generated by the model.

Source code in outlines/generator.py
async def __call__(self, prompt: Any, **inference_kwargs) -> Any:
    """Generate a response from the model.

    Parameters
    ----------
    prompt
        The prompt to use to generate a response.
    **inference_kwargs
        Additional keyword arguments to pass to the model.

    Returns
    -------
    Any
        The response generated by the model.

    """
    return await self.model.generate(
        prompt, self.output_type, **inference_kwargs
    )

__init__(model, output_type)

Parameters:

Name Type Description Default
model AsyncBlackBoxModel

An instance of an Outlines model.

required
output_type Optional[Any]

The output type that will be used to constrain the generation.

required
Source code in outlines/generator.py
def __init__(self, model: AsyncBlackBoxModel, output_type: Optional[Any]):
    """
    Parameters
    ----------
    model
        An instance of an Outlines model.
    output_type
        The output type that will be used to constrain the generation.

    """
    self.model = model
    self.output_type = output_type

    if isinstance(self.output_type, FSM):
        raise NotImplementedError(
            "FSM generation is not supported for API-based models"
        )

stream(prompt, **inference_kwargs) async

Generate a stream of responses from the model.

Parameters:

Name Type Description Default
prompt Any

The prompt to use to generate a response.

required
**inference_kwargs

Additional keyword arguments to pass to the model.

{}

Returns:

Type Description
Any

The response generated by the model.

Source code in outlines/generator.py
async def stream(
    self, prompt: Any, **inference_kwargs
) -> AsyncIterator[Any]:
    """Generate a stream of responses from the model.

    Parameters
    ----------
    prompt
        The prompt to use to generate a response.
    **inference_kwargs
        Additional keyword arguments to pass to the model.

    Returns
    -------
    Any
        The response generated by the model.

    """
    async for chunk in self.model.generate_stream(  # pragma: no cover
        prompt, self.output_type, **inference_kwargs
    ):
        yield chunk

BlackBoxGenerator

Synchronous generator for which we don't control constrained generation.

The output type provided is not compiled into a logits processor, but is instead directly passed on to the model.

Source code in outlines/generator.py
class BlackBoxGenerator:
    """Synchronous generator for which we don't control constrained
    generation.

    The output type provided is not compiled into a logits processor, but is
    instead directly passed on to the model.

    """
    output_type: Optional[Any]

    def __init__(self, model: BlackBoxModel, output_type: Optional[Any]):
        """
        Parameters
        ----------
        model
            An instance of an Outlines model.
        output_type
            The output type that will be used to constrain the generation.

        """
        self.model = model
        self.output_type = output_type

        if isinstance(self.output_type, FSM):
            raise NotImplementedError(
                "FSM generation is not supported for API-based models"
            )

    def __call__(self, prompt: Any, **inference_kwargs) -> Any:
        """Generate a response from the model.

        Parameters
        ----------
        prompt
            The prompt to use to generate a response.
        **inference_kwargs
            Additional keyword arguments to pass to the model.

        Returns
        -------
        Any
            The response generated by the model.

        """
        return self.model.generate(
            prompt, self.output_type, **inference_kwargs
        )

    def stream(self, prompt: Any, **inference_kwargs) -> Iterator[Any]:
        """Generate a stream of responses from the model.

        Parameters
        ----------
        prompt
            The prompt to use to generate a response.
        **inference_kwargs
            Additional keyword arguments to pass to the model.

        Returns
        -------
        Any
            The response generated by the model.

        """
        return self.model.generate_stream(
            prompt, self.output_type, **inference_kwargs
        )

__call__(prompt, **inference_kwargs)

Generate a response from the model.

Parameters:

Name Type Description Default
prompt Any

The prompt to use to generate a response.

required
**inference_kwargs

Additional keyword arguments to pass to the model.

{}

Returns:

Type Description
Any

The response generated by the model.

Source code in outlines/generator.py
def __call__(self, prompt: Any, **inference_kwargs) -> Any:
    """Generate a response from the model.

    Parameters
    ----------
    prompt
        The prompt to use to generate a response.
    **inference_kwargs
        Additional keyword arguments to pass to the model.

    Returns
    -------
    Any
        The response generated by the model.

    """
    return self.model.generate(
        prompt, self.output_type, **inference_kwargs
    )

__init__(model, output_type)

Parameters:

Name Type Description Default
model BlackBoxModel

An instance of an Outlines model.

required
output_type Optional[Any]

The output type that will be used to constrain the generation.

required
Source code in outlines/generator.py
def __init__(self, model: BlackBoxModel, output_type: Optional[Any]):
    """
    Parameters
    ----------
    model
        An instance of an Outlines model.
    output_type
        The output type that will be used to constrain the generation.

    """
    self.model = model
    self.output_type = output_type

    if isinstance(self.output_type, FSM):
        raise NotImplementedError(
            "FSM generation is not supported for API-based models"
        )

stream(prompt, **inference_kwargs)

Generate a stream of responses from the model.

Parameters:

Name Type Description Default
prompt Any

The prompt to use to generate a response.

required
**inference_kwargs

Additional keyword arguments to pass to the model.

{}

Returns:

Type Description
Any

The response generated by the model.

Source code in outlines/generator.py
def stream(self, prompt: Any, **inference_kwargs) -> Iterator[Any]:
    """Generate a stream of responses from the model.

    Parameters
    ----------
    prompt
        The prompt to use to generate a response.
    **inference_kwargs
        Additional keyword arguments to pass to the model.

    Returns
    -------
    Any
        The response generated by the model.

    """
    return self.model.generate_stream(
        prompt, self.output_type, **inference_kwargs
    )

SteerableGenerator

Represents a generator for which we control constrained generation.

The generator is responsible for building and storing the logits processor (which can be quite expensive to build), and then passing it to the model when the generator is called.

The argument defining constrained generation can be of 2 types associated to different methods to create an instance of the generator: - output_type (through __init__): an output type as defined in the outlines.types module - processor (through from_processor): an already built logits processor as defined in the outlines.processors module

The 2 parameters are mutually exclusive.

Source code in outlines/generator.py
class SteerableGenerator:
    """Represents a generator for which we control constrained generation.

    The generator is responsible for building and storing the logits processor
    (which can be quite expensive to build), and then passing it to the model
    when the generator is called.

    The argument defining constrained generation can be of 2 types associated
    to different methods to create an instance of the generator:
    - `output_type` (through `__init__`): an output type as defined in the
      `outlines.types` module
    - `processor` (through `from_processor`): an already built logits processor
       as defined in the `outlines.processors` module

    The 2 parameters are mutually exclusive.

    """
    logits_processor: Optional[OutlinesLogitsProcessor]

    def __init__(self, model: SteerableModel, output_type: Optional[Any]):
        """
        Parameters
        ----------
        model
            An instance of an Outlines model.
        output_type
            The output type expressed as a Python type

        """
        self.model = model
        if output_type is None:
            self.logits_processor = None
        else:
            term = python_types_to_terms(output_type)
            if isinstance(term, CFG):
                cfg_string = term.definition
                self.logits_processor = CFGLogitsProcessor(
                    cfg_string,
                    self.model.tokenizer,
                    self.model.tensor_library_name,
                )
            elif isinstance(term, FSM):
                guide = RegexGuide.from_interegular_fsm(
                    term.fsm,
                    self.model.tokenizer,
                )
                self.logits_processor = GuideLogitsProcessor(
                    self.model.tokenizer, guide, self.model.tensor_library_name
                )
            else:
                regex_string = to_regex(term)
                self.logits_processor = RegexLogitsProcessor(
                    regex_string,
                    self.model.tokenizer,
                    self.model.tensor_library_name,
                )

    @classmethod
    def from_processor(
        cls, model: SteerableModel, processor: OutlinesLogitsProcessor
    ):
        """Create a generator from a logits processor.

        Parameters
        ----------
        model
            An instance of an Outlines model.
        processor
            An instance of an OutlinesLogitsProcessor.

        """
        if not isinstance(processor, OutlinesLogitsProcessor):
            raise TypeError(
                "The processor argument must be an instance of "
                "OutlinesLogitsProcessor"
            )
        instance = cls.__new__(cls)
        instance.model = model
        instance.logits_processor = processor

        return instance

    def __call__(self, prompt: Any, **inference_kwargs) -> Any:
        """Generate a response from the model.

        Parameters
        ----------
        prompt
            The prompt to use to generate a response.
        **inference_kwargs
            Additional keyword arguments to pass to the model.

        Returns
        -------
        Any
            The response generated by the model.

        """
        return self.model.generate(
            prompt, self.logits_processor, **inference_kwargs
        )

    def stream(self, prompt: Any, **inference_kwargs) -> Iterator[Any]:
        """Generate a stream of responses from the model.

        Parameters
        ----------
        prompt
            The prompt to use to generate a response.
        **inference_kwargs
            Additional keyword arguments to pass to the model.

        Returns
        -------
        Any
            The response generated by the model.

        """
        return self.model.generate_stream(
            prompt, self.logits_processor, **inference_kwargs
        )

__call__(prompt, **inference_kwargs)

Generate a response from the model.

Parameters:

Name Type Description Default
prompt Any

The prompt to use to generate a response.

required
**inference_kwargs

Additional keyword arguments to pass to the model.

{}

Returns:

Type Description
Any

The response generated by the model.

Source code in outlines/generator.py
def __call__(self, prompt: Any, **inference_kwargs) -> Any:
    """Generate a response from the model.

    Parameters
    ----------
    prompt
        The prompt to use to generate a response.
    **inference_kwargs
        Additional keyword arguments to pass to the model.

    Returns
    -------
    Any
        The response generated by the model.

    """
    return self.model.generate(
        prompt, self.logits_processor, **inference_kwargs
    )

__init__(model, output_type)

Parameters:

Name Type Description Default
model SteerableModel

An instance of an Outlines model.

required
output_type Optional[Any]

The output type expressed as a Python type

required
Source code in outlines/generator.py
def __init__(self, model: SteerableModel, output_type: Optional[Any]):
    """
    Parameters
    ----------
    model
        An instance of an Outlines model.
    output_type
        The output type expressed as a Python type

    """
    self.model = model
    if output_type is None:
        self.logits_processor = None
    else:
        term = python_types_to_terms(output_type)
        if isinstance(term, CFG):
            cfg_string = term.definition
            self.logits_processor = CFGLogitsProcessor(
                cfg_string,
                self.model.tokenizer,
                self.model.tensor_library_name,
            )
        elif isinstance(term, FSM):
            guide = RegexGuide.from_interegular_fsm(
                term.fsm,
                self.model.tokenizer,
            )
            self.logits_processor = GuideLogitsProcessor(
                self.model.tokenizer, guide, self.model.tensor_library_name
            )
        else:
            regex_string = to_regex(term)
            self.logits_processor = RegexLogitsProcessor(
                regex_string,
                self.model.tokenizer,
                self.model.tensor_library_name,
            )

from_processor(model, processor) classmethod

Create a generator from a logits processor.

Parameters:

Name Type Description Default
model SteerableModel

An instance of an Outlines model.

required
processor OutlinesLogitsProcessor

An instance of an OutlinesLogitsProcessor.

required
Source code in outlines/generator.py
@classmethod
def from_processor(
    cls, model: SteerableModel, processor: OutlinesLogitsProcessor
):
    """Create a generator from a logits processor.

    Parameters
    ----------
    model
        An instance of an Outlines model.
    processor
        An instance of an OutlinesLogitsProcessor.

    """
    if not isinstance(processor, OutlinesLogitsProcessor):
        raise TypeError(
            "The processor argument must be an instance of "
            "OutlinesLogitsProcessor"
        )
    instance = cls.__new__(cls)
    instance.model = model
    instance.logits_processor = processor

    return instance

stream(prompt, **inference_kwargs)

Generate a stream of responses from the model.

Parameters:

Name Type Description Default
prompt Any

The prompt to use to generate a response.

required
**inference_kwargs

Additional keyword arguments to pass to the model.

{}

Returns:

Type Description
Any

The response generated by the model.

Source code in outlines/generator.py
def stream(self, prompt: Any, **inference_kwargs) -> Iterator[Any]:
    """Generate a stream of responses from the model.

    Parameters
    ----------
    prompt
        The prompt to use to generate a response.
    **inference_kwargs
        Additional keyword arguments to pass to the model.

    Returns
    -------
    Any
        The response generated by the model.

    """
    return self.model.generate_stream(
        prompt, self.logits_processor, **inference_kwargs
    )

Generator(model, output_type=None, *, processor=None)

Create a generator for the given model and output parameters.

The 2 parameters output_type and processor are mutually exclusive. The parameters processor is only supported for SteerableModel instances (typically local models) and is intended to be only used by advanced users.

Parameters:

Name Type Description Default
model Union[Model, AsyncModel]

An instance of an Outlines model.

required
output_type Optional[Any]

The output type expressed as a Python type or a type defined in the outlines.types.dsl module.

None
processor Optional[OutlinesLogitsProcessor]

An instance of an OutlinesLogitsProcessor.

None

Returns:

Type Description
Union[SteerableGenerator, BlackBoxGenerator, AsyncBlackBoxGenerator]

A generator instance.

Source code in outlines/generator.py
def Generator(
    model: Union[Model, AsyncModel],
    output_type: Optional[Any] = None,
    *,
    processor: Optional[OutlinesLogitsProcessor] = None,
) -> Union[SteerableGenerator, BlackBoxGenerator, AsyncBlackBoxGenerator]:
    """Create a generator for the given model and output parameters.

    The 2 parameters output_type and processor are mutually exclusive. The
    parameters processor is only supported for SteerableModel instances
    (typically local models) and is intended to be only used by advanced users.

    Parameters
    ----------
    model
        An instance of an Outlines model.
    output_type
        The output type expressed as a Python type or a type defined in the
        outlines.types.dsl module.
    processor
        An instance of an OutlinesLogitsProcessor.

    Returns
    -------
    Union[SteerableGenerator, BlackBoxGenerator, AsyncBlackBoxGenerator]
        A generator instance.

    """
    provided_output_params = sum(
        param is not None
        for param in [output_type, processor]
    )
    if provided_output_params > 1:
        raise ValueError(
            "At most one of output_type or processor can be provided"
        )

    if isinstance(model, SteerableModel): # type: ignore
        if processor is not None:
            return SteerableGenerator.from_processor(model, processor) # type: ignore
        else:
            return SteerableGenerator(model, output_type) # type: ignore
    else:
        if processor is not None:
            raise NotImplementedError(
                "This model does not support logits processors"
            )
        if isinstance(model, AsyncBlackBoxModel): # type: ignore
            return AsyncBlackBoxGenerator(model, output_type) # type: ignore
        elif isinstance(model, BlackBoxModel): # type: ignore
            return BlackBoxGenerator(model, output_type) # type: ignore
        else:
            raise ValueError(
                "The model argument must be an instance of "
                "SteerableModel, BlackBoxModel or AsyncBlackBoxModel"
            )