Skip to content

openaivec.responses

AsyncBatchResponses dataclass

Bases: Generic[T]

Stateless façade that turns OpenAI's JSON-mode API into a batched API (Async version).

This wrapper allows you to submit multiple user prompts in one JSON-mode request and receive the answers in the original order asynchronously. It also controls the maximum number of concurrent requests to the OpenAI API.

Example
import asyncio
from openai import AsyncOpenAI
from openaivec.aio.responses import AsyncBatchResponses

# Assuming openai_async_client is an initialized AsyncOpenAI client
openai_async_client = AsyncOpenAI() # Replace with your actual client initialization

vector_llm = AsyncBatchResponses(
    client=openai_async_client,
    model_name="gpt-4o-mini",
    system_message="You are a helpful assistant.",
    max_concurrency=5  # Limit concurrent requests
)
questions = ["What is the capital of France?", "Explain quantum physics simply."]
# Asynchronous call
async def main():
    answers = await vector_llm.parse(questions, batch_size=32)
    print(answers)

# Run the async function
asyncio.run(main())

Attributes:

Name Type Description
client AsyncOpenAI

Initialised openai.AsyncOpenAI client.

model_name str

Name of the model (or Azure deployment) to invoke.

system_message str

System prompt prepended to every request.

temperature float

Sampling temperature passed to the model.

top_p float

Nucleus-sampling parameter.

response_format Type[T]

Expected Pydantic type of each assistant message (defaults to str).

max_concurrency int

Maximum number of concurrent requests to the OpenAI API.

Source code in src/openaivec/responses.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
@dataclass(frozen=True)
class AsyncBatchResponses(Generic[T]):
    """Stateless façade that turns OpenAI's JSON-mode API into a batched API (Async version).

    This wrapper allows you to submit *multiple* user prompts in one JSON-mode
    request and receive the answers in the original order asynchronously. It also
    controls the maximum number of concurrent requests to the OpenAI API.

    Example:
        ```python
        import asyncio
        from openai import AsyncOpenAI
        from openaivec.aio.responses import AsyncBatchResponses

        # Assuming openai_async_client is an initialized AsyncOpenAI client
        openai_async_client = AsyncOpenAI() # Replace with your actual client initialization

        vector_llm = AsyncBatchResponses(
            client=openai_async_client,
            model_name="gpt-4o-mini",
            system_message="You are a helpful assistant.",
            max_concurrency=5  # Limit concurrent requests
        )
        questions = ["What is the capital of France?", "Explain quantum physics simply."]
        # Asynchronous call
        async def main():
            answers = await vector_llm.parse(questions, batch_size=32)
            print(answers)

        # Run the async function
        asyncio.run(main())
        ```

    Attributes:
        client: Initialised `openai.AsyncOpenAI` client.
        model_name: Name of the model (or Azure deployment) to invoke.
        system_message: System prompt prepended to every request.
        temperature: Sampling temperature passed to the model.
        top_p: Nucleus-sampling parameter.
        response_format: Expected Pydantic type of each assistant message
            (defaults to `str`).
        max_concurrency: Maximum number of concurrent requests to the OpenAI API.
    """

    client: AsyncOpenAI
    model_name: str  # it would be the name of deployment for Azure
    system_message: str
    temperature: float = 0.0
    top_p: float = 1.0
    response_format: Type[T] = str
    max_concurrency: int = 8  # Default concurrency limit
    _vectorized_system_message: str = field(init=False)
    _model_json_schema: dict = field(init=False)
    _semaphore: asyncio.Semaphore = field(init=False, repr=False)

    def __post_init__(self):
        object.__setattr__(
            self,
            "_vectorized_system_message",
            _vectorize_system_message(self.system_message),
        )
        # Initialize the semaphore after the object is created
        # Use object.__setattr__ because the dataclass is frozen
        object.__setattr__(self, "_semaphore", asyncio.Semaphore(self.max_concurrency))

    @observe(_LOGGER)
    @backoff_async(exception=RateLimitError, scale=15, max_retries=8)
    async def _request_llm(self, user_messages: List[Message[str]]) -> ParsedResponse[Response[T]]:
        """Make a single async call to the OpenAI *JSON mode* endpoint, respecting concurrency limits.

        Args:
            user_messages: Sequence of `Message[str]` objects representing the
                prompts for this minibatch. Each message carries a unique `id`
                so we can restore ordering later.

        Returns:
            ParsedResponse containing `Response[T]` which in turn holds the
            assistant messages in arbitrary order.

        Raises:
            openai.RateLimitError: Transparently re-raised after the
                exponential back-off decorator exhausts all retries.
        """
        response_format = self.response_format

        class MessageT(BaseModel):
            id: int
            body: response_format  # type: ignore

        class ResponseT(BaseModel):
            assistant_messages: List[MessageT]

        # Acquire semaphore before making the API call
        async with self._semaphore:
            # Directly await the async call instead of using asyncio.run()
            completion: ParsedResponse[ResponseT] = await self.client.responses.parse(
                model=self.model_name,
                instructions=self._vectorized_system_message,
                input=Request(user_messages=user_messages).model_dump_json(),
                temperature=self.temperature,
                top_p=self.top_p,
                text_format=ResponseT,
            )
            return cast(ParsedResponse[Response[T]], completion)

    @observe(_LOGGER)
    async def _predict_chunk(self, user_messages: List[str]) -> List[T]:
        """Helper executed asynchronously for every unique minibatch.

        This method:
        1. Converts plain strings into `Message[str]` with stable indices.
        2. Delegates the request to `_request_llm`.
        3. Reorders the responses so they match the original indices.

        The function is *pure* – it has no side-effects and the result depends
        only on its arguments.
        """
        messages = [Message(id=i, body=message) for i, message in enumerate(user_messages)]
        responses: ParsedResponse[Response[T]] = await self._request_llm(messages)
        response_dict = {message.id: message.body for message in responses.output_parsed.assistant_messages}
        # Ensure None is returned for missing IDs, though this shouldn't happen in normal operation
        sorted_responses = [response_dict.get(m.id) for m in messages]
        return sorted_responses

    @observe(_LOGGER)
    async def parse(self, inputs: List[str], batch_size: int) -> List[T]:
        """Asynchronous public API: batched predict.

        Args:
            inputs: All prompts that require a response. Duplicate
                entries are de-duplicated under the hood to save tokens.
            batch_size: Maximum number of *unique* prompts per LLM call.

        Returns:
            A list containing the assistant responses in the same order as
                *inputs*.
        """

        return await map_async(
            inputs=inputs,
            f=self._predict_chunk,
            batch_size=batch_size,  # Use the batch_size argument passed to the method
        )

parse(inputs, batch_size) async

Asynchronous public API: batched predict.

Parameters:

Name Type Description Default
inputs List[str]

All prompts that require a response. Duplicate entries are de-duplicated under the hood to save tokens.

required
batch_size int

Maximum number of unique prompts per LLM call.

required

Returns:

Type Description
List[T]

A list containing the assistant responses in the same order as inputs.

Source code in src/openaivec/responses.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
@observe(_LOGGER)
async def parse(self, inputs: List[str], batch_size: int) -> List[T]:
    """Asynchronous public API: batched predict.

    Args:
        inputs: All prompts that require a response. Duplicate
            entries are de-duplicated under the hood to save tokens.
        batch_size: Maximum number of *unique* prompts per LLM call.

    Returns:
        A list containing the assistant responses in the same order as
            *inputs*.
    """

    return await map_async(
        inputs=inputs,
        f=self._predict_chunk,
        batch_size=batch_size,  # Use the batch_size argument passed to the method
    )

BatchResponses dataclass

Bases: Generic[T]

Stateless façade that turns OpenAI's JSON‑mode API into a batched API.

This wrapper allows you to submit multiple user prompts in one JSON‑mode request and receive the answers in the original order.

Example
vector_llm = BatchResponses(
    client=openai_client,
    model_name="gpt‑4o‑mini",
    system_message="You are a helpful assistant."
)
answers = vector_llm.parse(questions, batch_size=32)

Attributes:

Name Type Description
client OpenAI

Initialised openai.OpenAI client.

model_name str

Name of the model (or Azure deployment) to invoke.

system_message str

System prompt prepended to every request.

temperature float

Sampling temperature passed to the model.

top_p float

Nucleus‑sampling parameter.

response_format Type[T]

Expected Pydantic type of each assistant message (defaults to str).

Notes

Internally the work is delegated to two helpers:

  • _predict_chunk – fragments the workload and restores ordering.
  • _request_llm – performs a single OpenAI API call.
Source code in src/openaivec/responses.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
@dataclass(frozen=True)
class BatchResponses(Generic[T]):
    """Stateless façade that turns OpenAI's JSON‑mode API into a batched API.

    This wrapper allows you to submit *multiple* user prompts in one JSON‑mode
    request and receive the answers in the original order.

    Example:
        ```python
        vector_llm = BatchResponses(
            client=openai_client,
            model_name="gpt‑4o‑mini",
            system_message="You are a helpful assistant."
        )
        answers = vector_llm.parse(questions, batch_size=32)
        ```

    Attributes:
        client: Initialised ``openai.OpenAI`` client.
        model_name: Name of the model (or Azure deployment) to invoke.
        system_message: System prompt prepended to every request.
        temperature: Sampling temperature passed to the model.
        top_p: Nucleus‑sampling parameter.
        response_format: Expected Pydantic type of each assistant message
            (defaults to ``str``).

    Notes:
        Internally the work is delegated to two helpers:

        * ``_predict_chunk`` – fragments the workload and restores ordering.
        * ``_request_llm`` – performs a single OpenAI API call.
    """

    client: OpenAI
    model_name: str  # it would be the name of deployment for Azure
    system_message: str
    temperature: float = 0.0
    top_p: float = 1.0
    response_format: Type[T] = str
    _vectorized_system_message: str = field(init=False)
    _model_json_schema: dict = field(init=False)

    def __post_init__(self):
        object.__setattr__(
            self,
            "_vectorized_system_message",
            _vectorize_system_message(self.system_message),
        )

    @observe(_LOGGER)
    @backoff(exception=RateLimitError, scale=15, max_retries=8)
    def _request_llm(self, user_messages: List[Message[str]]) -> ParsedResponse[Response[T]]:
        """Make a single call to the OpenAI *JSON mode* endpoint.

        Args:
            user_messages: Sequence of `Message[str]` objects representing the
                prompts for this minibatch.  Each message carries a unique `id`
                so we can restore ordering later.

        Returns:
            ParsedResponse containing `Response[T]` which in turn holds the
            assistant messages in arbitrary order.

        Raises:
            openai.RateLimitError: Transparently re‑raised after the
                exponential back‑off decorator exhausts all retries.
        """
        response_format = self.response_format

        class MessageT(BaseModel):
            id: int
            body: response_format  # type: ignore

        class ResponseT(BaseModel):
            assistant_messages: List[MessageT]

        completion: ParsedResponse[ResponseT] = self.client.responses.parse(
            model=self.model_name,
            instructions=self._vectorized_system_message,
            input=Request(user_messages=user_messages).model_dump_json(),
            temperature=self.temperature,
            top_p=self.top_p,
            text_format=ResponseT,
        )
        return cast(ParsedResponse[Response[T]], completion)

    @observe(_LOGGER)
    def _predict_chunk(self, user_messages: List[str]) -> List[T]:
        """Helper executed for every unique minibatch.

        This method:
        1. Converts plain strings into `Message[str]` with stable indices.
        2. Delegates the request to `_request_llm`.
        3. Reorders the responses so they match the original indices.

        The function is *pure* – it has no side‑effects and the result depends
        only on its arguments – which allows it to be used safely in both
        serial and parallel execution paths.
        """
        messages = [Message(id=i, body=message) for i, message in enumerate(user_messages)]
        responses: ParsedResponse[Response[T]] = self._request_llm(messages)
        response_dict = {message.id: message.body for message in responses.output_parsed.assistant_messages}
        sorted_responses = [response_dict.get(m.id, None) for m in messages]
        return sorted_responses

    @observe(_LOGGER)
    def parse(self, inputs: List[str], batch_size: int) -> List[T]:
        """Public API: batched predict.

        Args:
            inputs: All prompts that require a response.  Duplicate
                entries are de‑duplicated under the hood to save tokens.
            batch_size: Maximum number of *unique* prompts per LLM call.

        Returns:
            A list containing the assistant responses in the same order as
                *inputs*.
        """
        return map(inputs, self._predict_chunk, batch_size)

parse(inputs, batch_size)

Public API: batched predict.

Parameters:

Name Type Description Default
inputs List[str]

All prompts that require a response. Duplicate entries are de‑duplicated under the hood to save tokens.

required
batch_size int

Maximum number of unique prompts per LLM call.

required

Returns:

Type Description
List[T]

A list containing the assistant responses in the same order as inputs.

Source code in src/openaivec/responses.py
208
209
210
211
212
213
214
215
216
217
218
219
220
221
@observe(_LOGGER)
def parse(self, inputs: List[str], batch_size: int) -> List[T]:
    """Public API: batched predict.

    Args:
        inputs: All prompts that require a response.  Duplicate
            entries are de‑duplicated under the hood to save tokens.
        batch_size: Maximum number of *unique* prompts per LLM call.

    Returns:
        A list containing the assistant responses in the same order as
            *inputs*.
    """
    return map(inputs, self._predict_chunk, batch_size)