move: some random python packages to blshkv overlay

This commit is contained in:
Anton Bolshakov 2024-07-13 10:30:40 +08:00
parent 442022709c
commit fcc99173d7
No known key found for this signature in database
GPG key ID: 32BDCED870788F04
22 changed files with 0 additions and 2142 deletions

View file

@ -1,2 +0,0 @@
DIST aiogram-3.7.0.tar.gz 1304350 BLAKE2B 478c8efdf46397bf1d11cddf641874d74ddfa8a4bf80884101342fcde76d138d78ff728e6811d73b9abc489ceefe69edaeeeee15605e11031ce15fc27ee692ac SHA512 ae53bc3fc8c3053771a73c6258cf3d2928e3e9cb3cc76fcb3f2f94a1c9a2c3e7204f0c1f1e03735e2949ea44b050d4f5fb593a11e8d94ba7c9d5dc4cdf3d6539
DIST aiogram-3.8.0.tar.gz 1316993 BLAKE2B 25a69154494125c8305a626eca193b38e2e59cce8e987b1a4bba478ea2a43c7ff23820563ebadbe12b5b6345d32d8d1f48e266a23162e0afb4b8f0232e8595bf SHA512 de603270a5916b9600fe2c72d17988aea24f01d50ecb301caa8cf91a0a8b13577e27c63fae168bba0755a40a45848aa5976e5fcb6b7f99202f08252f65c2ef43

View file

@ -1,60 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_USE_PEP517=hatchling
PYTHON_COMPAT=( python3_{10..12} )
inherit distutils-r1 pypi
DESCRIPTION="Asynchronous framework for Telegram Bot API"
HOMEPAGE="https://github.com/aiogram/aiogram"
LICENSE="MIT"
SLOT="0"
KEYWORDS="amd64 ~arm64 ~x86"
IUSE="docs i18n fast proxy redis"
RESTRICT="test"
RDEPEND="
>=dev-python/magic-filter-1.0.12[${PYTHON_USEDEP}]
>=dev-python/aiohttp-3.9.0[${PYTHON_USEDEP}]
>=dev-python/pydantic-2.4.1[${PYTHON_USEDEP}] <dev-python/pydantic-2.8
>=dev-python/aiofiles-23.2.1[${PYTHON_USEDEP}]
>=dev-python/certifi-2023.7.22[${PYTHON_USEDEP}]
>=dev-python/typing-extensions-4.7.0[${PYTHON_USEDEP}]
fast? (
>=dev-python/uvloop-0.17.0[${PYTHON_USEDEP}]
>=dev-python/aiodns-3.0.0[${PYTHON_USEDEP}]
)
i18n? ( >=dev-python/Babel-2.13.0[${PYTHON_USEDEP}] )
proxy? ( >=dev-python/aiohttp-socks-0.8.3[${PYTHON_USEDEP}] )
redis? ( >=dev-python/redis-5.0.1[${PYTHON_USEDEP}]
dev-python/hiredis[${PYTHON_USEDEP}] )
docs? (
dev-python/sphinx
dev-python/furo
dev-python/sphinx-prompt
dev-python/towncrier
dev-python/pymdown-extensions
dev-python/pygments
)"
DEPEND="${RDEPEND}"
#BDEPEND="
# test? (
# dev-python/redis[${PYTHON_USEDEP}]
# dev-python/magic-filter[${PYTHON_USEDEP}]
# dev-python/aiofiles[${PYTHON_USEDEP}]
# dev-python/aiohttp[${PYTHON_USEDEP}]
# dev-python/aresponses[${PYTHON_USEDEP}]
# dev-python/aiohttp-socks[${PYTHON_USEDEP}]
# dev-python/pytest-lazy-fixture
# )
#"
REQUIRED_USE="${PYTHON_REQUIRED_USE}"
# RuntimeError: Found locale 'en' but this language is not compiled!
#distutils_enable_tests pytest

View file

@ -1,60 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_USE_PEP517=hatchling
PYTHON_COMPAT=( python3_{10..12} )
inherit distutils-r1 pypi
DESCRIPTION="Asynchronous framework for Telegram Bot API"
HOMEPAGE="https://github.com/aiogram/aiogram"
LICENSE="MIT"
SLOT="0"
KEYWORDS="amd64 ~arm64 ~x86"
IUSE="docs i18n fast proxy redis"
RESTRICT="test"
RDEPEND="
>=dev-python/magic-filter-1.0.12[${PYTHON_USEDEP}]
>=dev-python/aiohttp-3.9.0[${PYTHON_USEDEP}]
>=dev-python/pydantic-2.4.1[${PYTHON_USEDEP}] <dev-python/pydantic-2.8
>=dev-python/aiofiles-23.2.1[${PYTHON_USEDEP}]
>=dev-python/certifi-2023.7.22[${PYTHON_USEDEP}]
>=dev-python/typing-extensions-4.7.0[${PYTHON_USEDEP}]
fast? (
>=dev-python/uvloop-0.17.0[${PYTHON_USEDEP}]
>=dev-python/aiodns-3.0.0[${PYTHON_USEDEP}]
)
i18n? ( >=dev-python/Babel-2.13.0[${PYTHON_USEDEP}] )
proxy? ( >=dev-python/aiohttp-socks-0.8.3[${PYTHON_USEDEP}] )
redis? ( >=dev-python/redis-5.0.1[${PYTHON_USEDEP}]
dev-python/hiredis[${PYTHON_USEDEP}] )
docs? (
dev-python/sphinx
dev-python/furo
dev-python/sphinx-prompt
dev-python/towncrier
dev-python/pymdown-extensions
dev-python/pygments
)"
DEPEND="${RDEPEND}"
#BDEPEND="
# test? (
# dev-python/redis[${PYTHON_USEDEP}]
# dev-python/magic-filter[${PYTHON_USEDEP}]
# dev-python/aiofiles[${PYTHON_USEDEP}]
# dev-python/aiohttp[${PYTHON_USEDEP}]
# dev-python/aresponses[${PYTHON_USEDEP}]
# dev-python/aiohttp-socks[${PYTHON_USEDEP}]
# dev-python/pytest-lazy-fixture
# )
#"
REQUIRED_USE="${PYTHON_REQUIRED_USE}"
# RuntimeError: Found locale 'en' but this language is not compiled!
#distutils_enable_tests pytest

View file

@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="person">
<email>unknown@pentoo.ch</email>
<name>Author Unknown</name>
</maintainer>
<upstream>
<remote-id type="github">aiogram/aiogram</remote-id>
<remote-id type="pypi">aiogram</remote-id>
</upstream>
<use>
<flag name="docs">install docs</flag>
<flag name="fast">Ultra-fast asyncio</flag>
<flag name="i18n">i18n support</flag>
<flag name="proxy">proxy support</flag>
<flag name="redis">install local server</flag>
</use>
</pkgmetadata>

View file

@ -1 +0,0 @@
DIST aiopg-1.4.0.gh.tar.gz 202204 BLAKE2B 9f4dda868c919aef05e58608b353d51f0526ef1f847e8da8f4f11e75264a47b9eb4d948a87d591e66baf7052c00706b8f4cb6befd6bf33668987cfbfad451afc SHA512 cbb28b0b15eb89fa3162e59e3e88dac18bddbad3d95458b5c3eb487d15767c26810e941f83c3478cac8a8a085b23723690c8d294bf6aac433a14c3defcef1138

View file

@ -1,48 +0,0 @@
# Copyright 1999-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
PYTHON_COMPAT=( python3_{10..12} )
DISTUTILS_USE_PEP517=setuptools
inherit distutils-r1 optfeature
MY_PV=${PV/_beta/b}
DESCRIPTION="Postgres integration with asyncio"
HOMEPAGE="
https://aiopg.readthedocs.io
https://github.com/aio-libs/aiopg
"
SRC_URI="https://github.com/aio-libs/${PN}/archive/v${MY_PV}.tar.gz -> ${P}.gh.tar.gz"
S="${WORKDIR}/${PN}-${MY_PV}"
LICENSE="BSD"
SLOT="0"
KEYWORDS="amd64 x86"
# tests use docker containers -- that's wild!
RESTRICT="test"
RDEPEND="
dev-python/async-timeout[${PYTHON_USEDEP}]
dev-python/psycopg:2[${PYTHON_USEDEP}]
"
DEPEND="test? (
dev-python/pytest-timeout[${PYTHON_USEDEP}]
dev-python/sqlalchemy[${PYTHON_USEDEP}]
)"
#DOCS=( CHANGES.txt README.rst )
distutils_enable_tests pytest
#distutils_enable_sphinx docs dev-python/sphinxcontrib-asyncio
python_test() {
epytest --no-pull
}
pkg_postinst() {
optfeature "sqlalchemy support" dev-python/sqlalchemy
einfo "<dev-python/sqlalchemy-2.0.0"
}

View file

@ -1,2 +0,0 @@
DIST openai-1.16.2.tar.gz 152136 BLAKE2B 901b71b7f8a77679cb782338460b4bbd42334206a1c3cdeb2852bc2b2cb3171578f2e0cfc7705584194f5a806947392184b4371474abeb29469ae44dd6e743c7 SHA512 e05f6011d48c8bef75f31077de2c15018768504c29494955ef7c7031cd650c1434112939811edcc2701604817c995402ec453cec2ba5303f979c194cab393f79
DIST openai-1.32.0.tar.gz 181341 BLAKE2B ae5ebb5ee57ff10242767d3e1819a9a466ddacd3dca4309b3c18cad45274adace140ba58cbb7047021d839c73d45ac8e3776d3ddcb32efbc53127f126047d67f SHA512 4b01e66b2510df9d5f8d426c76f4f44ee10fc2ca6ec21d07c475cae8bfb379a6f5296fa57455741c423a02ccbc1511ec39cf4fccef9e1912898ffcc6ed31bd96

View file

@ -1,625 +0,0 @@
# OpenAI Python API library
[![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/)
The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.7+
application. The library includes type definitions for all request params and response fields,
and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx).
It is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi) with [Stainless](https://stainlessapi.com/).
## Documentation
The REST API documentation can be found [on platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](api.md).
## Installation
> [!IMPORTANT]
> The SDK was rewritten in v1, which was released November 6th 2023. See the [v1 migration guide](https://github.com/openai/openai-python/discussions/742), which includes scripts to automatically update your code.
```sh
# install from PyPI
pip install openai
```
## Usage
The full API of this library can be found in [api.md](api.md).
```python
import os
from openai import OpenAI
client = OpenAI(
# This is the default and can be omitted
api_key=os.environ.get("OPENAI_API_KEY"),
)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Say this is a test",
}
],
model="gpt-3.5-turbo",
)
```
While you can provide an `api_key` keyword argument,
we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
to add `OPENAI_API_KEY="My API Key"` to your `.env` file
so that your API Key is not stored in source control.
### Polling Helpers
When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes
helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
If an API method results in an action which could benefit from polling there will be a corresponding version of the
method ending in '\_and_poll'.
For instance to create a Run and poll until it reaches a terminal state you can run:
```python
run = client.beta.threads.runs.create_and_poll(
thread_id=thread.id,
assistant_id=assistant.id,
)
```
More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
```python
with client.beta.threads.runs.stream(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account.",
) as stream:
for event in stream:
# Print the text from text delta events
if event.type == "thread.message.delta" and event.data.delta.content:
print(event.data.delta.content[0].text)
```
More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md)
## Async usage
Simply import `AsyncOpenAI` instead of `OpenAI` and use `await` with each API call:
```python
import os
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI(
# This is the default and can be omitted
api_key=os.environ.get("OPENAI_API_KEY"),
)
async def main() -> None:
chat_completion = await client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Say this is a test",
}
],
model="gpt-3.5-turbo",
)
asyncio.run(main())
```
Functionality between the synchronous and asynchronous clients is otherwise identical.
## Streaming responses
We provide support for streaming responses using Server Side Events (SSE).
```python
from openai import OpenAI
client = OpenAI()
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
stream=True,
)
for chunk in stream:
print(chunk.choices[0].delta.content or "", end="")
```
The async client uses the exact same interface.
```python
from openai import AsyncOpenAI
client = AsyncOpenAI()
async def main():
stream = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
stream=True,
)
async for chunk in stream:
print(chunk.choices[0].delta.content or "", end="")
asyncio.run(main())
```
## Module-level client
> [!IMPORTANT]
> We highly recommend instantiating client instances instead of relying on the global client.
We also expose a global client instance that is accessible in a similar fashion to versions prior to v1.
```py
import openai
# optional; defaults to `os.environ['OPENAI_API_KEY']`
openai.api_key = '...'
# all client options can be configured just like the `OpenAI` instantiation counterpart
openai.base_url = "https://..."
openai.default_headers = {"x-foo": "true"}
completion = openai.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": "How do I output all files in a directory using Python?",
},
],
)
print(completion.choices[0].message.content)
```
The API is the exact same as the standard client instance based API.
This is intended to be used within REPLs or notebooks for faster iteration, **not** in application code.
We recommend that you always instantiate a client (e.g., with `client = OpenAI()`) in application code because:
- It can be difficult to reason about where client options are configured
- It's not possible to change certain client options without potentially causing race conditions
- It's harder to mock for testing purposes
- It's not possible to control cleanup of network connections
## Using types
Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev), which provide helper methods for things like:
- Serializing back into JSON, `model.model_dump_json(indent=2, exclude_unset=True)`
- Converting to a dictionary, `model.model_dump(exclude_unset=True)`
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
## Pagination
List methods in the OpenAI API are paginated.
This library provides auto-paginating iterators with each list response, so you do not have to request successive pages manually:
```python
import openai
client = OpenAI()
all_jobs = []
# Automatically fetches more pages as needed.
for job in client.fine_tuning.jobs.list(
limit=20,
):
# Do something with job here
all_jobs.append(job)
print(all_jobs)
```
Or, asynchronously:
```python
import asyncio
import openai
client = AsyncOpenAI()
async def main() -> None:
all_jobs = []
# Iterate through items across all pages, issuing requests as needed.
async for job in client.fine_tuning.jobs.list(
limit=20,
):
all_jobs.append(job)
print(all_jobs)
asyncio.run(main())
```
Alternatively, you can use the `.has_next_page()`, `.next_page_info()`, or `.get_next_page()` methods for more granular control working with pages:
```python
first_page = await client.fine_tuning.jobs.list(
limit=20,
)
if first_page.has_next_page():
print(f"will fetch next page using these details: {first_page.next_page_info()}")
next_page = await first_page.get_next_page()
print(f"number of items we just fetched: {len(next_page.data)}")
# Remove `await` for non-async usage.
```
Or just work directly with the returned data:
```python
first_page = await client.fine_tuning.jobs.list(
limit=20,
)
print(f"next page cursor: {first_page.after}") # => "next page cursor: ..."
for job in first_page.data:
print(job.id)
# Remove `await` for non-async usage.
```
## Nested params
Nested parameters are dictionaries, typed using `TypedDict`, for example:
```python
from openai import OpenAI
client = OpenAI()
completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Can you generate an example json object describing a fruit?",
}
],
model="gpt-3.5-turbo-1106",
response_format={"type": "json_object"},
)
```
## File uploads
Request parameters that correspond to file uploads can be passed as `bytes`, a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
```python
from pathlib import Path
from openai import OpenAI
client = OpenAI()
client.files.create(
file=Path("input.jsonl"),
purpose="fine-tune",
)
```
The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
## Handling errors
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `openai.APIConnectionError` is raised.
When the API returns a non-success status code (that is, 4xx or 5xx
response), a subclass of `openai.APIStatusError` is raised, containing `status_code` and `response` properties.
All errors inherit from `openai.APIError`.
```python
import openai
from openai import OpenAI
client = OpenAI()
try:
client.fine_tuning.jobs.create(
model="gpt-3.5-turbo",
training_file="file-abc123",
)
except openai.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
except openai.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
except openai.APIStatusError as e:
print("Another non-200-range status code was received")
print(e.status_code)
print(e.response)
```
Error codes are as followed:
| Status Code | Error Type |
| ----------- | -------------------------- |
| 400 | `BadRequestError` |
| 401 | `AuthenticationError` |
| 403 | `PermissionDeniedError` |
| 404 | `NotFoundError` |
| 422 | `UnprocessableEntityError` |
| 429 | `RateLimitError` |
| >=500 | `InternalServerError` |
| N/A | `APIConnectionError` |
### Retries
Certain errors are automatically retried 2 times by default, with a short exponential backoff.
Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict,
429 Rate Limit, and >=500 Internal errors are all retried by default.
You can use the `max_retries` option to configure or disable retry settings:
```python
from openai import OpenAI
# Configure the default for all requests:
client = OpenAI(
# default is 2
max_retries=0,
)
# Or, configure per-request:
client.with_options(max_retries=5).chat.completions.create(
messages=[
{
"role": "user",
"content": "How can I get the name of the current day in Node.js?",
}
],
model="gpt-3.5-turbo",
)
```
### Timeouts
By default requests time out after 10 minutes. You can configure this with a `timeout` option,
which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:
```python
from openai import OpenAI
# Configure the default for all requests:
client = OpenAI(
# 20 seconds (default is 10 minutes)
timeout=20.0,
)
# More granular control:
client = OpenAI(
timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
)
# Override per-request:
client.with_options(timeout=5 * 1000).chat.completions.create(
messages=[
{
"role": "user",
"content": "How can I list all files in a directory using Python?",
}
],
model="gpt-3.5-turbo",
)
```
On timeout, an `APITimeoutError` is thrown.
Note that requests that time out are [retried twice by default](#retries).
## Advanced
### Logging
We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module.
You can enable logging by setting the environment variable `OPENAI_LOG` to `debug`.
```shell
$ export OPENAI_LOG=debug
```
### How to tell whether `None` means `null` or missing
In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`:
```py
if response.my_field is None:
if 'my_field' not in response.model_fields_set:
print('Got json like {}, without a "my_field" key present at all.')
else:
print('Got json like {"my_field": null}.')
```
### Accessing raw response data (e.g. headers)
The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g.,
```py
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.with_raw_response.create(
messages=[{
"role": "user",
"content": "Say this is a test",
}],
model="gpt-3.5-turbo",
)
print(response.headers.get('X-My-Header'))
completion = response.parse() # get the object that `chat.completions.create()` would have returned
print(completion)
```
These methods return an [`LegacyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version.
For the sync client this will mostly be the same with the exception
of `content` & `text` will be methods instead of properties. In the
async client, all methods will be async.
A migration script will be provided & the migration in general should
be smooth.
#### `.with_streaming_response`
The above interface eagerly reads the full response body when you make the request, which may not always be what you want.
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
As such, `.with_streaming_response` methods return a different [`APIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object, and the async client returns an [`AsyncAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object.
```python
with client.chat.completions.with_streaming_response.create(
messages=[
{
"role": "user",
"content": "Say this is a test",
}
],
model="gpt-3.5-turbo",
) as response:
print(response.headers.get("X-My-Header"))
for line in response.iter_lines():
print(line)
```
The context manager is required so that the response will reliably be closed.
### Making custom/undocumented requests
This library is typed for convenient access the documented API.
If you need to access undocumented endpoints, params, or response properties, the library can still be used.
#### Undocumented endpoints
To make requests to undocumented endpoints, you can make requests using `client.get`, `client.post`, and other
http verbs. Options on the client will be respected (such as retries) will be respected when making this
request.
```py
import httpx
response = client.post(
"/foo",
cast_to=httpx.Response,
body={"my_param": True},
)
print(response.headers.get("x-foo"))
```
#### Undocumented request params
If you want to explicitly send an extra param, you can do so with the `extra_query`, `extra_body`, and `extra_headers` request
options.
#### Undocumented response properties
To access undocumented response properties, you can access the extra fields like `response.unknown_prop`. You
can also get all the extra fields on the Pydantic model as a dict with
[`response.model_extra`](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel.model_extra).
### Configuring the HTTP client
You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including:
- Support for proxies
- Custom transports
- Additional [advanced](https://www.python-httpx.org/advanced/#client-instances) functionality
```python
import httpx
from openai import OpenAI
client = OpenAI(
# Or use the `OPENAI_BASE_URL` env var
base_url="http://my.test.server.example.com:8083",
http_client=httpx.Client(
proxies="http://my.test.proxy.example.com",
transport=httpx.HTTPTransport(local_address="0.0.0.0"),
),
)
```
### Managing HTTP resources
By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting.
## Microsoft Azure OpenAI
To use this library with [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview), use the `AzureOpenAI`
class instead of the `OpenAI` class.
> [!IMPORTANT]
> The Azure API shape differs from the core API shape which means that the static types for responses / params
> won't always be correct.
```py
from openai import AzureOpenAI
# gets the API Key from environment variable AZURE_OPENAI_API_KEY
client = AzureOpenAI(
# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
api_version="2023-07-01-preview",
# https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
azure_endpoint="https://example-endpoint.openai.azure.com",
)
completion = client.chat.completions.create(
model="deployment-name", # e.g. gpt-35-instant
messages=[
{
"role": "user",
"content": "How do I output all files in a directory using Python?",
},
],
)
print(completion.model_dump_json(indent=2))
```
In addition to the options provided in the base `OpenAI` client, the following options are provided:
- `azure_endpoint` (or the `AZURE_OPENAI_ENDPOINT` environment variable)
- `azure_deployment`
- `api_version` (or the `OPENAI_API_VERSION` environment variable)
- `azure_ad_token` (or the `AZURE_OPENAI_AD_TOKEN` environment variable)
- `azure_ad_token_provider`
An example of using the client with Azure Active Directory can be found [here](https://github.com/openai/openai-python/blob/main/examples/azure_ad.py).
## Versioning
This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions:
1. Changes that only affect static types, without breaking runtime behavior.
2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_.
3. Changes that we do not expect to impact the vast majority of users in practice.
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-python/issues) with questions, bugs, or suggestions.
## Requirements
Python 3.7 or higher.

View file

@ -1,638 +0,0 @@
# OpenAI Python API library
[![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/)
The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.7+
application. The library includes type definitions for all request params and response fields,
and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx).
It is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi) with [Stainless](https://stainlessapi.com/).
## Documentation
The REST API documentation can be found [on platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](api.md).
## Installation
> [!IMPORTANT]
> The SDK was rewritten in v1, which was released November 6th 2023. See the [v1 migration guide](https://github.com/openai/openai-python/discussions/742), which includes scripts to automatically update your code.
```sh
# install from PyPI
pip install openai
```
## Usage
The full API of this library can be found in [api.md](api.md).
```python
import os
from openai import OpenAI
client = OpenAI(
# This is the default and can be omitted
api_key=os.environ.get("OPENAI_API_KEY"),
)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Say this is a test",
}
],
model="gpt-3.5-turbo",
)
```
While you can provide an `api_key` keyword argument,
we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
to add `OPENAI_API_KEY="My API Key"` to your `.env` file
so that your API Key is not stored in source control.
### Polling Helpers
When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes
helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
If an API method results in an action which could benefit from polling there will be a corresponding version of the
method ending in '\_and_poll'.
For instance to create a Run and poll until it reaches a terminal state you can run:
```python
run = client.beta.threads.runs.create_and_poll(
thread_id=thread.id,
assistant_id=assistant.id,
)
```
More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
### Bulk Upload Helpers
When creating an interacting with vector stores, you can use the polling helpers to monitor the status of operations.
For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once.
```python
sample_files = [Path("sample-paper.pdf"), ...]
batch = await client.vector_stores.file_batches.upload_and_poll(
store.id,
files=sample_files,
)
```
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
```python
with client.beta.threads.runs.stream(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account.",
) as stream:
for event in stream:
# Print the text from text delta events
if event.type == "thread.message.delta" and event.data.delta.content:
print(event.data.delta.content[0].text)
```
More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md)
## Async usage
Simply import `AsyncOpenAI` instead of `OpenAI` and use `await` with each API call:
```python
import os
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI(
# This is the default and can be omitted
api_key=os.environ.get("OPENAI_API_KEY"),
)
async def main() -> None:
chat_completion = await client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Say this is a test",
}
],
model="gpt-3.5-turbo",
)
asyncio.run(main())
```
Functionality between the synchronous and asynchronous clients is otherwise identical.
## Streaming responses
We provide support for streaming responses using Server Side Events (SSE).
```python
from openai import OpenAI
client = OpenAI()
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
stream=True,
)
for chunk in stream:
print(chunk.choices[0].delta.content or "", end="")
```
The async client uses the exact same interface.
```python
from openai import AsyncOpenAI
client = AsyncOpenAI()
async def main():
stream = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
stream=True,
)
async for chunk in stream:
print(chunk.choices[0].delta.content or "", end="")
asyncio.run(main())
```
## Module-level client
> [!IMPORTANT]
> We highly recommend instantiating client instances instead of relying on the global client.
We also expose a global client instance that is accessible in a similar fashion to versions prior to v1.
```py
import openai
# optional; defaults to `os.environ['OPENAI_API_KEY']`
openai.api_key = '...'
# all client options can be configured just like the `OpenAI` instantiation counterpart
openai.base_url = "https://..."
openai.default_headers = {"x-foo": "true"}
completion = openai.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": "How do I output all files in a directory using Python?",
},
],
)
print(completion.choices[0].message.content)
```
The API is the exact same as the standard client instance based API.
This is intended to be used within REPLs or notebooks for faster iteration, **not** in application code.
We recommend that you always instantiate a client (e.g., with `client = OpenAI()`) in application code because:
- It can be difficult to reason about where client options are configured
- It's not possible to change certain client options without potentially causing race conditions
- It's harder to mock for testing purposes
- It's not possible to control cleanup of network connections
## Using types
Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like:
- Serializing back into JSON, `model.to_json()`
- Converting to a dictionary, `model.to_dict()`
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
## Pagination
List methods in the OpenAI API are paginated.
This library provides auto-paginating iterators with each list response, so you do not have to request successive pages manually:
```python
import openai
client = OpenAI()
all_jobs = []
# Automatically fetches more pages as needed.
for job in client.fine_tuning.jobs.list(
limit=20,
):
# Do something with job here
all_jobs.append(job)
print(all_jobs)
```
Or, asynchronously:
```python
import asyncio
import openai
client = AsyncOpenAI()
async def main() -> None:
all_jobs = []
# Iterate through items across all pages, issuing requests as needed.
async for job in client.fine_tuning.jobs.list(
limit=20,
):
all_jobs.append(job)
print(all_jobs)
asyncio.run(main())
```
Alternatively, you can use the `.has_next_page()`, `.next_page_info()`, or `.get_next_page()` methods for more granular control working with pages:
```python
first_page = await client.fine_tuning.jobs.list(
limit=20,
)
if first_page.has_next_page():
print(f"will fetch next page using these details: {first_page.next_page_info()}")
next_page = await first_page.get_next_page()
print(f"number of items we just fetched: {len(next_page.data)}")
# Remove `await` for non-async usage.
```
Or just work directly with the returned data:
```python
first_page = await client.fine_tuning.jobs.list(
limit=20,
)
print(f"next page cursor: {first_page.after}") # => "next page cursor: ..."
for job in first_page.data:
print(job.id)
# Remove `await` for non-async usage.
```
## Nested params
Nested parameters are dictionaries, typed using `TypedDict`, for example:
```python
from openai import OpenAI
client = OpenAI()
completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "Can you generate an example json object describing a fruit?",
}
],
model="gpt-3.5-turbo-1106",
response_format={"type": "json_object"},
)
```
## File uploads
Request parameters that correspond to file uploads can be passed as `bytes`, a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
```python
from pathlib import Path
from openai import OpenAI
client = OpenAI()
client.files.create(
file=Path("input.jsonl"),
purpose="fine-tune",
)
```
The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
## Handling errors
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `openai.APIConnectionError` is raised.
When the API returns a non-success status code (that is, 4xx or 5xx
response), a subclass of `openai.APIStatusError` is raised, containing `status_code` and `response` properties.
All errors inherit from `openai.APIError`.
```python
import openai
from openai import OpenAI
client = OpenAI()
try:
client.fine_tuning.jobs.create(
model="gpt-3.5-turbo",
training_file="file-abc123",
)
except openai.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
except openai.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
except openai.APIStatusError as e:
print("Another non-200-range status code was received")
print(e.status_code)
print(e.response)
```
Error codes are as followed:
| Status Code | Error Type |
| ----------- | -------------------------- |
| 400 | `BadRequestError` |
| 401 | `AuthenticationError` |
| 403 | `PermissionDeniedError` |
| 404 | `NotFoundError` |
| 422 | `UnprocessableEntityError` |
| 429 | `RateLimitError` |
| >=500 | `InternalServerError` |
| N/A | `APIConnectionError` |
### Retries
Certain errors are automatically retried 2 times by default, with a short exponential backoff.
Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict,
429 Rate Limit, and >=500 Internal errors are all retried by default.
You can use the `max_retries` option to configure or disable retry settings:
```python
from openai import OpenAI
# Configure the default for all requests:
client = OpenAI(
# default is 2
max_retries=0,
)
# Or, configure per-request:
client.with_options(max_retries=5).chat.completions.create(
messages=[
{
"role": "user",
"content": "How can I get the name of the current day in Node.js?",
}
],
model="gpt-3.5-turbo",
)
```
### Timeouts
By default requests time out after 10 minutes. You can configure this with a `timeout` option,
which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:
```python
from openai import OpenAI
# Configure the default for all requests:
client = OpenAI(
# 20 seconds (default is 10 minutes)
timeout=20.0,
)
# More granular control:
client = OpenAI(
timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
)
# Override per-request:
client.with_options(timeout=5.0).chat.completions.create(
messages=[
{
"role": "user",
"content": "How can I list all files in a directory using Python?",
}
],
model="gpt-3.5-turbo",
)
```
On timeout, an `APITimeoutError` is thrown.
Note that requests that time out are [retried twice by default](#retries).
## Advanced
### Logging
We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module.
You can enable logging by setting the environment variable `OPENAI_LOG` to `debug`.
```shell
$ export OPENAI_LOG=debug
```
### How to tell whether `None` means `null` or missing
In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`:
```py
if response.my_field is None:
if 'my_field' not in response.model_fields_set:
print('Got json like {}, without a "my_field" key present at all.')
else:
print('Got json like {"my_field": null}.')
```
### Accessing raw response data (e.g. headers)
The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g.,
```py
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.with_raw_response.create(
messages=[{
"role": "user",
"content": "Say this is a test",
}],
model="gpt-3.5-turbo",
)
print(response.headers.get('X-My-Header'))
completion = response.parse() # get the object that `chat.completions.create()` would have returned
print(completion)
```
These methods return an [`LegacyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version.
For the sync client this will mostly be the same with the exception
of `content` & `text` will be methods instead of properties. In the
async client, all methods will be async.
A migration script will be provided & the migration in general should
be smooth.
#### `.with_streaming_response`
The above interface eagerly reads the full response body when you make the request, which may not always be what you want.
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
As such, `.with_streaming_response` methods return a different [`APIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object, and the async client returns an [`AsyncAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object.
```python
with client.chat.completions.with_streaming_response.create(
messages=[
{
"role": "user",
"content": "Say this is a test",
}
],
model="gpt-3.5-turbo",
) as response:
print(response.headers.get("X-My-Header"))
for line in response.iter_lines():
print(line)
```
The context manager is required so that the response will reliably be closed.
### Making custom/undocumented requests
This library is typed for convenient access to the documented API.
If you need to access undocumented endpoints, params, or response properties, the library can still be used.
#### Undocumented endpoints
To make requests to undocumented endpoints, you can make requests using `client.get`, `client.post`, and other
http verbs. Options on the client will be respected (such as retries) will be respected when making this
request.
```py
import httpx
response = client.post(
"/foo",
cast_to=httpx.Response,
body={"my_param": True},
)
print(response.headers.get("x-foo"))
```
#### Undocumented request params
If you want to explicitly send an extra param, you can do so with the `extra_query`, `extra_body`, and `extra_headers` request
options.
#### Undocumented response properties
To access undocumented response properties, you can access the extra fields like `response.unknown_prop`. You
can also get all the extra fields on the Pydantic model as a dict with
[`response.model_extra`](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel.model_extra).
### Configuring the HTTP client
You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including:
- Support for proxies
- Custom transports
- Additional [advanced](https://www.python-httpx.org/advanced/#client-instances) functionality
```python
from openai import OpenAI, DefaultHttpxClient
client = OpenAI(
# Or use the `OPENAI_BASE_URL` env var
base_url="http://my.test.server.example.com:8083",
http_client=DefaultHttpxClient(
proxies="http://my.test.proxy.example.com",
transport=httpx.HTTPTransport(local_address="0.0.0.0"),
),
)
```
### Managing HTTP resources
By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting.
## Microsoft Azure OpenAI
To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI`
class instead of the `OpenAI` class.
> [!IMPORTANT]
> The Azure API shape differs from the core API shape which means that the static types for responses / params
> won't always be correct.
```py
from openai import AzureOpenAI
# gets the API Key from environment variable AZURE_OPENAI_API_KEY
client = AzureOpenAI(
# https://learn.microsoft.com/azure/ai-services/openai/reference#rest-api-versioning
api_version="2023-07-01-preview",
# https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
azure_endpoint="https://example-endpoint.openai.azure.com",
)
completion = client.chat.completions.create(
model="deployment-name", # e.g. gpt-35-instant
messages=[
{
"role": "user",
"content": "How do I output all files in a directory using Python?",
},
],
)
print(completion.to_json())
```
In addition to the options provided in the base `OpenAI` client, the following options are provided:
- `azure_endpoint` (or the `AZURE_OPENAI_ENDPOINT` environment variable)
- `azure_deployment`
- `api_version` (or the `OPENAI_API_VERSION` environment variable)
- `azure_ad_token` (or the `AZURE_OPENAI_AD_TOKEN` environment variable)
- `azure_ad_token_provider`
An example of using the client with Microsoft Entra ID (formerly known as Azure Active Directory) can be found [here](https://github.com/openai/openai-python/blob/main/examples/azure_ad.py).
## Versioning
This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions:
1. Changes that only affect static types, without breaking runtime behavior.
2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_.
3. Changes that we do not expect to impact the vast majority of users in practice.
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-python/issues) with questions, bugs, or suggestions.
## Requirements
Python 3.7 or higher.

View file

@ -1,15 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="person">
<email>blshkv@pentoo.org</email>
<name>Anton Bolshakov</name>
</maintainer>
<upstream>
<remote-id type="github">openai/openai-python</remote-id>
<remote-id type="pypi">openai</remote-id>
</upstream>
<use>
<flag name="datalib">Make data libraries like `numpy` and `pandas` optional dependencies (130MB+)</flag>
</use>
</pkgmetadata>

View file

@ -1,44 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_USE_PEP517=hatchling
PYTHON_COMPAT=( python3_{10..12} )
inherit distutils-r1 pypi
DESCRIPTION="Python client library for the OpenAI API"
HOMEPAGE="https://github.com/openai/openai-python"
LICENSE="MIT"
SLOT="0"
KEYWORDS="amd64 ~arm64 x86"
IUSE="datalib"
RDEPEND="
>=dev-python/httpx-0.23.0[${PYTHON_USEDEP}]
>=dev-python/pydantic-1.9.0[${PYTHON_USEDEP}]
>=dev-python/typing-extensions-4.7[${PYTHON_USEDEP}]
>=dev-python/anyio-3.5.0[${PYTHON_USEDEP}]
>=dev-python/distro-1.7.0[${PYTHON_USEDEP}]
dev-python/sniffio[${PYTHON_USEDEP}]
>dev-python/tqdm-4[${PYTHON_USEDEP}]
datalib? (
dev-python/numpy[${PYTHON_USEDEP}]
>=dev-python/pandas-1.2.3[${PYTHON_USEDEP}]
>=dev-python/pandas-stubs-1.1.0.11[${PYTHON_USEDEP}]
)"
# wandb? ( dev-python/wandb[$PYTHON_USEDEP}] )"
DEPEND="${RDEPEND}"
REQUIRED_USE="${PYTHON_REQUIRED_USE}"
# wandb? ( datalib )"
RESTRICT="test"
src_prepare(){
cp "${FILESDIR}/${PV}-README.md" ./README.md || die
eapply_user
}

View file

@ -1,44 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_USE_PEP517=hatchling
PYTHON_COMPAT=( python3_{10..12} )
inherit distutils-r1 pypi
DESCRIPTION="Python client library for the OpenAI API"
HOMEPAGE="https://github.com/openai/openai-python"
LICENSE="MIT"
SLOT="0"
KEYWORDS="amd64 ~arm64 x86"
IUSE="datalib"
RDEPEND="
>=dev-python/httpx-0.23.0[${PYTHON_USEDEP}]
>=dev-python/pydantic-1.9.0[${PYTHON_USEDEP}]
>=dev-python/typing-extensions-4.7[${PYTHON_USEDEP}]
>=dev-python/anyio-3.5.0[${PYTHON_USEDEP}]
>=dev-python/distro-1.7.0[${PYTHON_USEDEP}]
dev-python/sniffio[${PYTHON_USEDEP}]
>dev-python/tqdm-4[${PYTHON_USEDEP}]
datalib? (
dev-python/numpy[${PYTHON_USEDEP}]
>=dev-python/pandas-1.2.3[${PYTHON_USEDEP}]
>=dev-python/pandas-stubs-1.1.0.11[${PYTHON_USEDEP}]
)"
# wandb? ( dev-python/wandb[$PYTHON_USEDEP}] )"
DEPEND="${RDEPEND}"
REQUIRED_USE="${PYTHON_REQUIRED_USE}"
# wandb? ( datalib )"
RESTRICT="test"
src_prepare(){
cp "${FILESDIR}/${PV}-README.md" ./README.md || die
eapply_user
}

View file

@ -1 +0,0 @@
DIST peewee_async-0.10.0.tar.gz 14550 BLAKE2B dff8a48d960cfaed4e69d52e53734c6269052d2c64f4e612c138bb3c3209235698a013fa47abe9694e3eaa416c549d42653a4d8c84d44e8233f6b232744c67e9 SHA512 4b940c93c5748c28d2bea76ba24cb3c5f845031753455f818b4b8ef5ebc19f8592825e26232f120669aade7fd7d8a85f55b231539d75e0706337fd6b5d63694e

View file

@ -1,14 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="person">
<email>unknown@pentoo.ch</email>
<name>Author Unknown</name>
</maintainer>
<upstream>
<remote-id type="github">05bit/peewee-async</remote-id>
<remote-id type="pypi">peewee-async</remote-id>
</upstream>
</pkgmetadata>

View file

@ -1,29 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_USE_PEP517=poetry
PYTHON_COMPAT=( python3_{10..12} )
inherit distutils-r1 pypi
DESCRIPTION="Asynchronous interface for peewee ORM powered by asyncio."
HOMEPAGE="https://github.com/05bit/peewee-async"
#SRC_URI="https://github.com/05bit/peewee-async/archive/refs/tags/v${PV}.tar.gz -> ${P}.gh.tar.gz"
LICENSE="MIT"
SLOT="0"
KEYWORDS="amd64 x86"
IUSE="postgres"
RDEPEND=">=dev-python/peewee-3.15.5[${PYTHON_USEDEP}]
>=dev-python/importlib-metadata-6.7.0[${PYTHON_USEDEP}]
postgres? ( dev-python/aiopg[${PYTHON_USEDEP}] )
"
#FIXME:
# mysql dev-python/aiomysql
# mysql = ["aiomysql", "cryptography"]
DEPEND="${RDEPEND}"
REQUIRED_USE="${PYTHON_REQUIRED_USE}"

View file

@ -1,3 +0,0 @@
DIST pillow-10.1.0.gh.tar.gz 50798274 BLAKE2B a69be8187e7206b50350af399b73b5c43b29c249a4a5a6bb23438dae9967bb84f1d487f52b188811cb3d2e550245f8d5a765668d8d5de1375a84fcd96fc531d9 SHA512 01c97b68d4167d10539a2d29fb82676fb417ee5003f0acd9f602ed13d41b200579497cc0ef0949b2c1549b684f76f2d43895a52abdb1367345d2affd544c5b5a
DIST pillow-10.2.0.gh.tar.gz 46244216 BLAKE2B 27bb076f0adade34295e6dfec5c5a1499471036e4f5358f0b3985c064a0ae962be9e91985851b369a49162ef520a84bb20eb1a5acb074fe94d861f042e4ada92 SHA512 d77ebedfb89d541db2fcba4fc354e097b9917594170bad07c67e22de123c9e0bbc9c55a938fc152bbdcca3ce23de6fa374a6d3d3bdbd44f16dbc2616c1bddf60
DIST pillow-10.3.0.gh.tar.gz 46636534 BLAKE2B 523486a3adc8905dbe0691e7092654e7884c34703f351e228a49b1cb6e9cffdf504b842aa0c83b18afb8f0dbe1265274044003aba540d6427ec3cd46ddda1498 SHA512 11095f435ba30ac364575271de4c94d498b6fc1d67730b8212fae6f187902129018ca950aa878843f4d1b29e25aab1be245ed313fd3bc110ccf9ce3ae266d840

View file

@ -1,50 +0,0 @@
From 0e523d986858e7c0b4acd45ea1c5a3a639e39b4b Mon Sep 17 00:00:00 2001
From: Andrew Murray <radarhere@users.noreply.github.com>
Date: Sat, 2 Dec 2023 10:57:16 +1100
Subject: [PATCH] Fixed closing file pointer with olefile 0.47
---
src/PIL/FpxImagePlugin.py | 1 +
src/PIL/MicImagePlugin.py | 3 +++
2 files changed, 4 insertions(+)
diff --git a/src/PIL/FpxImagePlugin.py b/src/PIL/FpxImagePlugin.py
index a878cbfd2..3027ef45b 100644
--- a/src/PIL/FpxImagePlugin.py
+++ b/src/PIL/FpxImagePlugin.py
@@ -227,6 +227,7 @@ class FpxImageFile(ImageFile.ImageFile):
break # isn't really required
self.stream = stream
+ self._fp = self.fp
self.fp = None
def load(self):
diff --git a/src/PIL/MicImagePlugin.py b/src/PIL/MicImagePlugin.py
index 801318930..e4154902f 100644
--- a/src/PIL/MicImagePlugin.py
+++ b/src/PIL/MicImagePlugin.py
@@ -66,6 +66,7 @@ class MicImageFile(TiffImagePlugin.TiffImageFile):
self._n_frames = len(self.images)
self.is_animated = self._n_frames > 1
+ self.__fp = self.fp
self.seek(0)
def seek(self, frame):
@@ -87,10 +88,12 @@ class MicImageFile(TiffImagePlugin.TiffImageFile):
return self.frame
def close(self):
+ self.__fp.close()
self.ole.close()
super().close()
def __exit__(self, *args):
+ self.__fp.close()
self.ole.close()
super().__exit__()
--
2.43.0

View file

@ -1,62 +0,0 @@
From 774d7a570d9f76903de3c3267512b8a7d252c21e Mon Sep 17 00:00:00 2001
From: James Le Cuirot <chewi@gentoo.org>
Date: Fri, 22 Dec 2023 22:38:27 +0000
Subject: [PATCH] Fix cross-compiling by searching the right lib and include
directories
We were previously searching the `{sys.prefix}/lib` and
`{sys.prefix}/include` directories unconditionally. This is problematic
when cross-compiling, as it does not take account of any sysroot where
alternative libraries and headers are located. Adding `-I/usr/include`
causes the build to explode, at least when cross-compiling from 64-bit
to 32-bit.
Python does not officially support cross-compiling, but Gentoo achieves
this by modifying the sysconfig variables like `LIBDIR` and `INCLUDEDIR`
with great results.
Assuming "lib" is bad. 64-bit Linux systems often use lib64, putting
32-bit libraries under lib. You cannot assume that either though, as
pure 64-bit Linux systems may just use lib instead. Things get even
stranger on RISC-V.
The value of `sys.prefix` changes when using a virtualenv. Dependencies
may be installed here, so it does make sense to continue supporting this
case, even if it is incompatible with cross-compiling. Unlike regular
environments, "lib" is generally used for libraries, although a lib64
symlink may also be present.
---
setup.py | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/setup.py b/setup.py
index 1bf0bcff558..07163d001fc 100755
--- a/setup.py
+++ b/setup.py
@@ -15,6 +15,7 @@
import struct
import subprocess
import sys
+import sysconfig
import warnings
from setuptools import Extension, setup
@@ -504,8 +505,16 @@ def build_extensions(self):
for d in os.environ[k].split(os.path.pathsep):
_add_directory(library_dirs, d)
- _add_directory(library_dirs, os.path.join(sys.prefix, "lib"))
- _add_directory(include_dirs, os.path.join(sys.prefix, "include"))
+ _add_directory(
+ library_dirs,
+ (sys.prefix == sys.base_prefix and sysconfig.get_config_var("LIBDIR"))
+ or os.path.join(sys.prefix, "lib"),
+ )
+ _add_directory(
+ include_dirs,
+ (sys.prefix == sys.base_prefix and sysconfig.get_config_var("INCLUDEDIR"))
+ or os.path.join(sys.prefix, "include"),
+ )
#
# add platform directories

View file

@ -1,16 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="project">
<email>python@gentoo.org</email>
<name>Python</name>
</maintainer>
<use>
<flag name="imagequant">Build with Image Quantization Library support</flag>
<flag name="raqm">Build with complex text layout library support</flag>
</use>
<upstream>
<remote-id type="pypi">pillow</remote-id>
<remote-id type="github">python-pillow/Pillow</remote-id>
</upstream>
</pkgmetadata>

View file

@ -1,142 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_EXT=1
# setuptools wrapper
DISTUTILS_USE_PEP517=standalone
PYTHON_COMPAT=( python3_{10..12} pypy3 )
PYTHON_REQ_USE='tk?,threads(+)'
inherit distutils-r1 toolchain-funcs virtualx
MY_PN=Pillow
MY_P=${MY_PN}-${PV}
DESCRIPTION="Python Imaging Library (fork)"
HOMEPAGE="
https://python-pillow.org/
https://github.com/python-pillow/
https://pypi.org/project/Pillow/
"
SRC_URI="
https://github.com/python-pillow/Pillow/archive/${PV}.tar.gz
-> ${P}.gh.tar.gz
"
S="${WORKDIR}/${MY_P}"
LICENSE="HPND"
SLOT="0"
KEYWORDS="~alpha amd64 arm arm64 hppa ~ia64 ~loong ~m68k ppc ppc64 ~riscv ~s390 sparc x86 ~amd64-linux ~x86-linux ~arm64-macos ~x64-macos"
IUSE="examples imagequant +jpeg jpeg2k lcms raqm test tiff tk truetype webp xcb zlib"
REQUIRED_USE="test? ( jpeg jpeg2k lcms tiff truetype )"
RESTRICT="!test? ( test )"
# https://bugs.gentoo.org/895948
DEPEND="
imagequant? ( media-gfx/libimagequant:= )
jpeg? ( media-libs/libjpeg-turbo:= )
jpeg2k? ( media-libs/openjpeg:2= )
lcms? ( media-libs/lcms:2= )
raqm? ( media-libs/libraqm:= )
tiff? ( media-libs/tiff:=[jpeg,zlib] )
truetype? ( media-libs/freetype:2= )
webp? ( media-libs/libwebp:= )
xcb? ( x11-libs/libxcb )
zlib? ( sys-libs/zlib:= )
"
RDEPEND="
${DEPEND}
dev-python/olefile[${PYTHON_USEDEP}]
"
BDEPEND="
dev-python/setuptools[${PYTHON_USEDEP}]
dev-python/wheel[${PYTHON_USEDEP}]
virtual/pkgconfig
test? (
${RDEPEND}
dev-python/defusedxml[${PYTHON_USEDEP}]
dev-python/packaging[${PYTHON_USEDEP}]
dev-python/pytest[${PYTHON_USEDEP}]
dev-python/pytest-timeout[${PYTHON_USEDEP}]
|| (
media-gfx/imagemagick[png]
media-gfx/graphicsmagick[png]
)
)
"
EPYTEST_DESELECT=(
# TODO; incompatible Qt version?
Tests/test_qt_image_qapplication.py::test_sanity
)
PATCHES=(
# https://github.com/python-pillow/pillow/pull/7594
"${FILESDIR}/${P}-olefile-0.47.patch"
)
usepil() {
usex "${1}" enable disable
}
python_configure_all() {
# It's important that these flags are also passed during the install phase
# as well. Make sure of that if you change the lines below. See bug 661308.
cat >> setup.cfg <<-EOF || die
[build_ext]
disable_platform_guessing = True
$(usepil truetype)_freetype = True
$(usepil jpeg)_jpeg = True
$(usepil jpeg2k)_jpeg2000 = True
$(usepil lcms)_lcms = True
$(usepil raqm)_raqm = True
$(usepil tiff)_tiff = True
$(usepil imagequant)_imagequant = True
$(usepil webp)_webp = True
$(usepil webp)_webpmux = True
$(usepil xcb)_xcb = True
$(usepil zlib)_zlib = True
EOF
# setup.py won't let us add the right toolchain paths but it does
# accept additional ones from INCLUDE and LIB so set these. You
# wouldn't normally need these at all as the toolchain should look
# here anyway but it doesn't for this setup.py.
export \
INCLUDE="${ESYSROOT}"/usr/include \
LIB="${ESYSROOT}"/usr/$(get_libdir)
# We have patched in this env var.
tc-export PKG_CONFIG
}
src_test() {
virtx distutils-r1_src_test
}
python_test() {
local EPYTEST_DESELECT=(
# TODO (is clipboard unreliable in Xvfb?)
Tests/test_imagegrab.py::TestImageGrab::test_grabclipboard
)
"${EPYTHON}" selftest.py --installed || die "selftest failed with ${EPYTHON}"
# no:relaxed: pytest-relaxed plugin make our tests fail. deactivate if installed
epytest -p no:relaxed || die "Tests failed with ${EPYTHON}"
}
python_install() {
python_doheader src/libImaging/*.h
distutils-r1_python_install
}
python_install_all() {
if use examples ; then
docinto example
dodoc docs/example/*
docompress -x /usr/share/doc/${PF}/example
fi
distutils-r1_python_install_all
}

View file

@ -1,134 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_EXT=1
# setuptools wrapper
DISTUTILS_USE_PEP517=standalone
PYTHON_COMPAT=( python3_{10..12} pypy3 )
PYTHON_REQ_USE='tk?,threads(+)'
inherit distutils-r1 toolchain-funcs virtualx
MY_PN=Pillow
MY_P=${MY_PN}-${PV}
DESCRIPTION="Python Imaging Library (fork)"
HOMEPAGE="
https://python-pillow.org/
https://github.com/python-pillow/Pillow/
https://pypi.org/project/pillow/
"
SRC_URI="
https://github.com/python-pillow/Pillow/archive/${PV}.tar.gz
-> ${P}.gh.tar.gz
"
S="${WORKDIR}/${MY_P}"
LICENSE="HPND"
SLOT="0"
KEYWORDS="~alpha amd64 arm arm64 ~hppa ~ia64 ~loong ~m68k ~ppc ppc64 ~riscv ~s390 ~sparc x86 ~amd64-linux ~x86-linux ~arm64-macos ~x64-macos"
IUSE="examples imagequant +jpeg jpeg2k lcms raqm test tiff tk truetype webp xcb zlib"
REQUIRED_USE="test? ( jpeg jpeg2k lcms tiff truetype )"
RESTRICT="!test? ( test )"
# https://bugs.gentoo.org/895948
DEPEND="
imagequant? ( media-gfx/libimagequant:= )
jpeg? ( media-libs/libjpeg-turbo:= )
jpeg2k? ( media-libs/openjpeg:2= )
lcms? ( media-libs/lcms:2= )
raqm? ( media-libs/libraqm:= )
tiff? ( media-libs/tiff:=[jpeg,zlib] )
truetype? ( media-libs/freetype:2= )
webp? ( media-libs/libwebp:= )
xcb? ( x11-libs/libxcb )
zlib? ( sys-libs/zlib:= )
"
RDEPEND="
${DEPEND}
dev-python/olefile[${PYTHON_USEDEP}]
"
BDEPEND="
dev-python/setuptools[${PYTHON_USEDEP}]
dev-python/wheel[${PYTHON_USEDEP}]
virtual/pkgconfig
test? (
${RDEPEND}
dev-python/defusedxml[${PYTHON_USEDEP}]
dev-python/packaging[${PYTHON_USEDEP}]
dev-python/pytest[${PYTHON_USEDEP}]
dev-python/pytest-timeout[${PYTHON_USEDEP}]
|| (
media-gfx/imagemagick[png]
media-gfx/graphicsmagick[png]
)
)
"
EPYTEST_DESELECT=(
# TODO; incompatible Qt version?
Tests/test_qt_image_qapplication.py::test_sanity
)
PATCHES=(
# https://github.com/python-pillow/pillow/pull/7634
"${FILESDIR}/${P}-cross.patch"
)
usepil() {
usex "${1}" enable disable
}
python_configure_all() {
# It's important that these flags are also passed during the install phase
# as well. Make sure of that if you change the lines below. See bug 661308.
cat >> setup.cfg <<-EOF || die
[build_ext]
disable_platform_guessing = True
$(usepil truetype)_freetype = True
$(usepil jpeg)_jpeg = True
$(usepil jpeg2k)_jpeg2000 = True
$(usepil lcms)_lcms = True
$(usepil raqm)_raqm = True
$(usepil tiff)_tiff = True
$(usepil imagequant)_imagequant = True
$(usepil webp)_webp = True
$(usepil webp)_webpmux = True
$(usepil xcb)_xcb = True
$(usepil zlib)_zlib = True
EOF
# We have patched in this env var.
tc-export PKG_CONFIG
}
src_test() {
virtx distutils-r1_src_test
}
python_test() {
local EPYTEST_DESELECT=(
# TODO (is clipboard unreliable in Xvfb?)
Tests/test_imagegrab.py::TestImageGrab::test_grabclipboard
)
"${EPYTHON}" selftest.py --installed || die "selftest failed with ${EPYTHON}"
# no:relaxed: pytest-relaxed plugin make our tests fail. deactivate if installed
epytest -p no:relaxed || die "Tests failed with ${EPYTHON}"
}
python_install() {
python_doheader src/libImaging/*.h
distutils-r1_python_install
}
python_install_all() {
if use examples ; then
docinto example
dodoc docs/example/*
docompress -x /usr/share/doc/${PF}/example
fi
distutils-r1_python_install_all
}

View file

@ -1,133 +0,0 @@
# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
DISTUTILS_EXT=1
# setuptools wrapper
DISTUTILS_USE_PEP517=standalone
PYTHON_COMPAT=( python3_{10..12} pypy3 )
PYTHON_REQ_USE='tk?,threads(+)'
inherit distutils-r1 toolchain-funcs virtualx
MY_PN=Pillow
MY_P=${MY_PN}-${PV}
DESCRIPTION="Python Imaging Library (fork)"
HOMEPAGE="
https://python-pillow.org/
https://github.com/python-pillow/Pillow/
https://pypi.org/project/pillow/
"
SRC_URI="
https://github.com/python-pillow/Pillow/archive/${PV}.tar.gz
-> ${P}.gh.tar.gz
"
S="${WORKDIR}/${MY_P}"
LICENSE="HPND"
SLOT="0"
KEYWORDS="~alpha amd64 arm arm64 ~hppa ~ia64 ~loong ~m68k ~ppc ppc64 ~riscv ~s390 ~sparc x86 ~amd64-linux ~x86-linux ~arm64-macos ~x64-macos"
IUSE="examples imagequant +jpeg jpeg2k lcms raqm test tiff tk truetype webp xcb zlib"
REQUIRED_USE="test? ( jpeg jpeg2k lcms tiff truetype )"
RESTRICT="!test? ( test )"
# https://bugs.gentoo.org/895948
DEPEND="
imagequant? ( media-gfx/libimagequant:= )
jpeg? ( media-libs/libjpeg-turbo:= )
jpeg2k? ( media-libs/openjpeg:2= )
lcms? ( media-libs/lcms:2= )
raqm? ( media-libs/libraqm:= )
tiff? ( media-libs/tiff:=[jpeg,zlib] )
truetype? ( media-libs/freetype:2= )
webp? ( media-libs/libwebp:= )
xcb? ( x11-libs/libxcb )
zlib? ( sys-libs/zlib:= )
"
RDEPEND="
${DEPEND}
dev-python/olefile[${PYTHON_USEDEP}]
"
BDEPEND="
dev-python/setuptools[${PYTHON_USEDEP}]
dev-python/wheel[${PYTHON_USEDEP}]
virtual/pkgconfig
test? (
dev-python/defusedxml[${PYTHON_USEDEP}]
dev-python/packaging[${PYTHON_USEDEP}]
dev-python/pytest-timeout[${PYTHON_USEDEP}]
|| (
media-gfx/imagemagick[png]
media-gfx/graphicsmagick[png]
)
)
"
EPYTEST_XDIST=1
distutils_enable_tests pytest
PATCHES=(
# https://github.com/python-pillow/pillow/pull/7634
"${FILESDIR}/${PN}-10.2.0-cross.patch"
)
usepil() {
usex "${1}" enable disable
}
python_configure_all() {
# It's important that these flags are also passed during the install phase
# as well. Make sure of that if you change the lines below. See bug 661308.
cat >> setup.cfg <<-EOF || die
[build_ext]
disable_platform_guessing = True
$(usepil truetype)_freetype = True
$(usepil jpeg)_jpeg = True
$(usepil jpeg2k)_jpeg2000 = True
$(usepil lcms)_lcms = True
$(usepil raqm)_raqm = True
$(usepil tiff)_tiff = True
$(usepil imagequant)_imagequant = True
$(usepil webp)_webp = True
$(usepil webp)_webpmux = True
$(usepil xcb)_xcb = True
$(usepil zlib)_zlib = True
EOF
# We have patched in this env var.
tc-export PKG_CONFIG
}
src_test() {
virtx distutils-r1_src_test
}
python_test() {
local EPYTEST_DESELECT=(
# TODO (is clipboard unreliable in Xvfb?)
Tests/test_imagegrab.py::TestImageGrab::test_grabclipboard
# requires xz-utils[extra-filters]?
Tests/test_file_libtiff.py::TestFileLibTiff::test_lzma
)
"${EPYTHON}" selftest.py --installed || die "selftest failed with ${EPYTHON}"
local -x PYTEST_DISABLE_PLUGIN_AUTOLOAD=1
# leak tests are fragile and broken under xdist
epytest -k "not leak" -p timeout || die "Tests failed with ${EPYTHON}"
}
python_install() {
python_doheader src/libImaging/*.h
distutils-r1_python_install
}
python_install_all() {
if use examples ; then
docinto example
dodoc docs/example/*
docompress -x /usr/share/doc/${PF}/example
fi
distutils-r1_python_install_all
}