forked from ogx-ai/ogx-client-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathopenai.py
More file actions
152 lines (122 loc) · 5.4 KB
/
openai.py
File metadata and controls
152 lines (122 loc) · 5.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Type, cast
import httpx
from ..._types import Body, Query, Headers, NotGiven, not_given
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
from ..._wrappers import DataWrapper
from ..._base_client import make_request_options
from ...types.model_list_response import ModelListResponse
__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
class OpenAIResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> OpenAIResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
"""
return OpenAIResourceWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
"""
return OpenAIResourceWithStreamingResponse(self)
def list(
self,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ModelListResponse:
"""List models using the OpenAI API."""
return self._get(
"/v1/models",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=DataWrapper[ModelListResponse]._unwrapper,
),
cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]),
)
class AsyncOpenAIResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncOpenAIResourceWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
"""
return AsyncOpenAIResourceWithStreamingResponse(self)
async def list(
self,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ModelListResponse:
"""List models using the OpenAI API."""
return await self._get(
"/v1/models",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=DataWrapper[ModelListResponse]._unwrapper,
),
cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]),
)
class OpenAIResourceWithRawResponse:
def __init__(self, openai: OpenAIResource) -> None:
self._openai = openai
self.list = to_raw_response_wrapper(
openai.list,
)
class AsyncOpenAIResourceWithRawResponse:
def __init__(self, openai: AsyncOpenAIResource) -> None:
self._openai = openai
self.list = async_to_raw_response_wrapper(
openai.list,
)
class OpenAIResourceWithStreamingResponse:
def __init__(self, openai: OpenAIResource) -> None:
self._openai = openai
self.list = to_streamed_response_wrapper(
openai.list,
)
class AsyncOpenAIResourceWithStreamingResponse:
def __init__(self, openai: AsyncOpenAIResource) -> None:
self._openai = openai
self.list = async_to_streamed_response_wrapper(
openai.list,
)