nvidia-apex: rename use flag

This commit is contained in:
Anton Bolshakov 2023-03-21 22:47:31 +08:00
parent 69f30fe108
commit 9e81682dcb
No known key found for this signature in database
GPG key ID: 32BDCED870788F04
3 changed files with 20 additions and 8 deletions

View file

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="person">
<email>blshkv@pentoo.ch</email>
<name>Anton Bolshakov</name>
</maintainer>
<use>
<flag name="cuda">Add support for CUDA processing</flag>
</use>
<upstream>
<remote-id type="github">NVIDIA/apex</remote-id>
</upstream>
</pkgmetadata>

View file

@ -21,10 +21,9 @@ SRC_URI="https://github.com/NVIDIA/apex/archive/${HASH_COMMIT}.tar.gz -> ${P}-gh
LICENSE=""
SLOT="0"
KEYWORDS="~amd64 ~arm64 ~x86"
KEYWORDS="~amd64"
#FIXME: can't use global "cuda"
IUSE="cuda_ext"
IUSE="cuda"
RDEPEND=">=dev-python/cxxfilt-0.2.0[${PYTHON_USEDEP}]
>=dev-python/tqdm-4.28.1[${PYTHON_USEDEP}]
@ -42,7 +41,7 @@ S="${WORKDIR}/apex-${HASH_COMMIT}"
#If you wish to cross-compile for a single specific architecture,
#export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.
python_configure_all() {
if use cuda_ext; then
if use cuda; then
# export MAX_JOBS=1
#export TORCH_CUDA_ARCH_LIST="compute capability"
export TORCH_CUDA_ARCH_LIST="7.5"

View file

@ -21,10 +21,9 @@ SRC_URI="https://github.com/NVIDIA/apex/archive/${HASH_COMMIT}.tar.gz -> ${P}-gh
LICENSE=""
SLOT="0"
KEYWORDS="~amd64 ~arm64 ~x86"
KEYWORDS="~amd64"
#FIXME: can't use global "cuda"
IUSE="cuda_ext"
IUSE="cuda"
RDEPEND=">=dev-python/cxxfilt-0.2.0[${PYTHON_USEDEP}]
>=dev-python/tqdm-4.28.1[${PYTHON_USEDEP}]
@ -42,7 +41,7 @@ S="${WORKDIR}/apex-${HASH_COMMIT}"
#If you wish to cross-compile for a single specific architecture,
#export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.
python_configure_all() {
if use cuda_ext; then
if use cuda; then
# export MAX_JOBS=1
#export TORCH_CUDA_ARCH_LIST="compute capability"
export TORCH_CUDA_ARCH_LIST="7.5"