Official Python SDK for ModelsLab API - Generate AI content including images, videos, audio, 3D models, and more.
pip install modelslab_pyFor async support:
pip install 'modelslab_py[async]'from modelslab_py.core.client import Client
client = Client(api_key="your_api_key")- Image Generation and Editing
- Video Generation
- Audio Processing
- 3D Model Generation
- Interior Design
- Deepfake Operations
- Community Models Integration
from modelslab_py.core.client import Client
from modelslab_py.core.apis.image_editing import Image_editing
from modelslab_py.schemas.image_editing import BackgroundRemoverSchema
client = Client(api_key="your_api_key")
api = Image_editing(client=client, enterprise=False)
schema = BackgroundRemoverSchema(
image="https://example.com/image.jpg",
base64=False
)
response = api.background_remover(schema=schema)
print(response)from modelslab_py.core.apis.video import Video
from modelslab_py.schemas.video import Text2Video
client = Client(api_key="your_api_key")
api = Video(client=client, enterprise=False)
schema = Text2Video(
model_id="zeroscope",
prompt="a cat walking in a garden",
num_frames=30
)
response = api.text_to_video(schema=schema)
print(response)from modelslab_py.core.apis.interior import Interior
from modelslab_py.schemas.interior import InteriorSchema
client = Client(api_key="your_api_key")
api = Interior(client=client, enterprise=False)
schema = InteriorSchema(
prompt="modern minimalist bedroom",
init_image="https://example.com/room.jpg"
)
response = api.interior(schema=schema)
print(response)from modelslab_py.core.apis.audio import Audio
from modelslab_py.schemas.audio import Text2Speech
client = Client(api_key="your_api_key")
api = Audio(client=client, enterprise=False)
schema = Text2Speech(
prompt="Hello, welcome to ModelsLab",
voice_id="madison",
language="english"
)
response = api.text_to_speech(schema=schema)
print(response)from modelslab_py.core.apis.three_d import Three_D
from modelslab_py.schemas.threed import Text23D
client = Client(api_key="your_api_key")
api = Three_D(client=client, enterprise=False)
schema = Text23D(
prompt="a wooden chair",
model_id="meshy-4",
output_format="obj"
)
response = api.text_to_3d(schema=schema)
print(response)from modelslab_py.core.apis.community import Community
from modelslab_py.schemas.community import ZImageTurbo
client = Client(api_key="your_api_key")
api = Community(client=client)
schema = ZImageTurbo(
prompt="a beautiful sunset over mountains",
width=1024,
height=1024,
samples=1
)
response = api.z_image_turbo(schema=schema)
print(response)from modelslab_py.core.apis.community import Community
from modelslab_py.schemas.community import Flux2Dev
client = Client(api_key="your_api_key")
api = Community(client=client)
schema = Flux2Dev(
prompt="a futuristic city at night",
width=1024,
height=1024,
samples=1
)
response = api.flux_2_dev(schema=schema)
print(response)from modelslab_py.core.apis.community import Community
from modelslab_py.schemas.community import Text2Image
client = Client(api_key="your_api_key")
api = Community(client=client, enterprise=False)
schema = Text2Image(
prompt="a beautiful landscape",
model_id="midjourney",
width=512,
height=512
)
response = api.text_to_image(schema=schema)
print(response)All API methods have async equivalents. Use them for concurrent requests and better performance.
import asyncio
from modelslab_py.core.client import Client
from modelslab_py.core.apis.video import Video
from modelslab_py.schemas.video import Text2Video
schema1 = Text2Video(
model_id="wan2.2",
prompt="a cat walking",
num_frames=25,
fps=16
)
schema2 = Text2Video(
model_id="wan2.2",
prompt="a dog running",
num_frames=25,
fps=16
)
async def main():
async with Client(api_key="your_api_key") as client:
api = Video(client=client, enterprise=False)
# Run both requests concurrently
results = await asyncio.gather(
api.async_text_to_video(schema=schema1),
api.async_text_to_video(schema=schema2),
)
print(results)
asyncio.run(main())For any synchronous method, prefix with async_:
text_to_video()→async_text_to_video()text_to_image()→async_text_to_image()background_remover()→async_background_remover()
The SDK includes provider-specific implementations for easy access to various AI models:
from modelslab_py.core.client import Client
from modelslab_py.providers import MinimaxProvider
from modelslab_py.providers.minimax.schemas import Hailuo23T2VSchema
client = Client(api_key="your_api_key")
minimax = MinimaxProvider(client=client)
schema = Hailuo23T2VSchema(
prompt="A sunset aerial shot of a lone rider galloping across a snow-covered plain"
)
response = minimax.hailuo_23_t2v(schema=schema)
print(response)from modelslab_py.providers import BFLProvider
from modelslab_py.providers.bfl.schemas import FluxPro11Schema
client = Client(api_key="your_api_key")
bfl = BFLProvider(client=client)
schema = FluxPro11Schema(
prompt="A futuristic cityscape at sunset with flying cars",
width=1024,
height=768
)
response = bfl.flux_pro_11(schema=schema)
print(response)from modelslab_py.providers import KlingAIProvider
from modelslab_py.providers.klingai.schemas import KlingV25TurboT2VSchema
client = Client(api_key="your_api_key")
kling = KlingAIProvider(client=client)
schema = KlingV25TurboT2VSchema(
prompt="Cinematic drone shot of a luxury cruise ship sailing",
duration="5",
aspect_ratio="16:9"
)
response = kling.kling_v25_turbo_t2v(schema=schema)
print(response)from modelslab_py.providers import SyncProvider
from modelslab_py.providers.sync.schemas import Lipsync2Schema
client = Client(api_key="your_api_key")
sync = SyncProvider(client=client)
schema = Lipsync2Schema(
init_video="https://example.com/video.mp4",
init_audio="https://example.com/audio.mp3"
)
response = sync.lipsync_2(schema=schema)
print(response)- AlibabaProvider: Wan2.5 video generation models
- BFLProvider: Flux Pro image generation models
- BytePlusProvider: SeeDream, SeeDance, and Omni Human models
- ElevenLabsProvider: Text-to-speech and audio generation
- GoogleProvider: Imagen models for image generation
- InworldProvider: Text-to-speech with voice cloning
- KlingAIProvider: Kling video generation models
- MinimaxProvider: Hailuo video generation models
- OpenAIProvider: Sora video generation
- RunwayProvider: Gen-4 image and video models
- SonautoProvider: Music and song generation
- SyncProvider: Video lip-sync models
- Image Editing: Background removal, super resolution, inpainting, outpainting
- Video: Text-to-video, image-to-video, watermark removal
- Audio: Text-to-speech, voice conversion, music generation
- Interior: Room design, floor planning, object placement
- 3D: Text-to-3D, image-to-3D model generation
- Deepfake: Face swapping, video manipulation
- Community: Access to community-trained models
For detailed documentation, visit docs.modelslab.com
- Discord: Join our community
- Twitter: @ModelsLabAI
- GitHub: ModelsLab
See LICENSE file for details.
