Skip to content

Commit 21a26f6

Browse files
committed
Update (docs/ README.md g4f/client/client.py)
1 parent 2a29f1b commit 21a26f6

File tree

7 files changed

+34
-19
lines changed

7 files changed

+34
-19
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ from g4f.client import Client
174174

175175
client = Client()
176176
response = client.chat.completions.create(
177-
model="gpt-3.5-turbo",
177+
model="gpt-4o-mini",
178178
messages=[{"role": "user", "content": "Hello"}],
179179
# Add any other necessary parameters
180180
)

docs/async_client.md

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ client = Client(
5757
**Here’s an improved example of creating chat completions:**
5858
```python
5959
response = await async_client.chat.completions.create(
60-
model="gpt-3.5-turbo",
60+
model="gpt-4o-mini",
6161
messages=[
6262
{
6363
"role": "user",
@@ -99,7 +99,7 @@ async def main():
9999
client = Client()
100100

101101
response = await client.chat.completions.async_create(
102-
model="gpt-3.5-turbo",
102+
model="gpt-4o-mini",
103103
messages=[
104104
{
105105
"role": "user",
@@ -230,7 +230,7 @@ async def main():
230230
client = Client()
231231

232232
task1 = client.chat.completions.async_create(
233-
model="gpt-3.5-turbo",
233+
model="gpt-4o-mini",
234234
messages=[
235235
{
236236
"role": "user",
@@ -262,6 +262,7 @@ The G4F AsyncClient supports a wide range of AI models and providers, allowing y
262262

263263
### Models
264264
- GPT-3.5-Turbo
265+
- GPT-4o-Mini
265266
- GPT-4
266267
- DALL-E 3
267268
- Gemini
@@ -306,7 +307,7 @@ Implementing proper error handling and following best practices is crucial when
306307
```python
307308
try:
308309
response = await client.chat.completions.async_create(
309-
model="gpt-3.5-turbo",
310+
model="gpt-4o-mini",
310311
messages=[
311312
{
312313
"role": "user",

docs/client.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ client = Client(
6262
**Here’s an improved example of creating chat completions:**
6363
```python
6464
response = client.chat.completions.create(
65-
model="gpt-3.5-turbo",
65+
model="gpt-4o-mini",
6666
messages=[
6767
{
6868
"role": "user",
@@ -104,7 +104,7 @@ from g4f.client import Client
104104
client = Client()
105105

106106
response = client.chat.completions.create(
107-
model="gpt-3.5-turbo",
107+
model="gpt-4o-mini",
108108
messages=[
109109
{
110110
"role": "user",

docs/docker.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ import requests
7171

7272
url = "http://localhost:1337/v1/chat/completions"
7373
body = {
74-
"model": "gpt-3.5-turbo",
74+
"model": "gpt-4o-mini",
7575
"stream": False,
7676
"messages": [
7777
{"role": "assistant", "content": "What can you do?"}

docs/git.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ from g4f.client import Client
9595
client = Client()
9696

9797
response = client.chat.completions.create(
98-
model="gpt-3.5-turbo",
98+
model="gpt-4o-mini",
9999
messages=[
100100
{
101101
"role": "user",

docs/interference-api.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ curl -X POST "http://localhost:1337/v1/chat/completions" \
6464
"content": "Hello"
6565
}
6666
],
67-
"model": "gpt-3.5-turbo"
67+
"model": "gpt-4o-mini"
6868
}'
6969
```
7070

@@ -104,7 +104,7 @@ client = OpenAI(
104104
)
105105

106106
response = client.chat.completions.create(
107-
model="gpt-3.5-turbo",
107+
model="gpt-4o-mini",
108108
messages=[{"role": "user", "content": "Write a poem about a tree"}],
109109
stream=True,
110110
)
@@ -131,7 +131,7 @@ import requests
131131
url = "http://localhost:1337/v1/chat/completions"
132132

133133
body = {
134-
"model": "gpt-3.5-turbo",
134+
"model": "gpt-4o-mini",
135135
"stream": False,
136136
"messages": [
137137
{"role": "assistant", "content": "What can you do?"}

g4f/client/client.py

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -154,14 +154,29 @@ def __init__(self, *args, **kwargs):
154154
stacklevel=2
155155
)
156156
super().__init__(*args, **kwargs)
157+
self.chat = Chat(self)
158+
self._images = Images(self)
159+
self.completions = Completions(self)
157160

158-
async def chat_complete(self, *args, **kwargs):
159-
"""Legacy method that redirects to async_create"""
160-
return await self.chat.completions.async_create(*args, **kwargs)
161+
@property
162+
def images(self) -> 'Images':
163+
return self._images
164+
165+
async def async_create(self, *args, **kwargs) -> Union['ChatCompletion', AsyncIterator['ChatCompletionChunk']]:
166+
response = await super().async_create(*args, **kwargs)
167+
async for result in response:
168+
return result
161169

162-
async def create_image(self, *args, **kwargs):
163-
"""Legacy method that redirects to async_generate"""
164-
return await self.images.async_generate(*args, **kwargs)
170+
async def async_generate(self, *args, **kwargs) -> 'ImagesResponse':
171+
return await super().async_generate(*args, **kwargs)
172+
173+
async def _fetch_image(self, url: str) -> bytes:
174+
async with ClientSession() as session:
175+
async with session.get(url) as resp:
176+
if resp.status == 200:
177+
return await resp.read()
178+
else:
179+
raise Exception(f"Failed to fetch image from {url}, status code {resp.status}")
165180

166181
class Completions:
167182
def __init__(self, client: Client, provider: ProviderType = None):
@@ -531,4 +546,3 @@ def _save_image(self, image: 'PILImage') -> str:
531546
async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs):
532547
# Existing implementation, adjust if you want to support b64_json here as well
533548
pass
534-

0 commit comments

Comments
 (0)