Best practices to help you build reliable, production-ready applications with MyoSapiens.
✅ Do:
❌ Don't:
# ✅ Good
import os
client = Client(api_key=os.getenv("MYOSDK_API_KEY"))
# ❌ Bad
client = Client(api_key="v2m_live_abc123...") # Never do this!
The SDK is designed for server-side use. Recommended architecture:
Frontend → Your Backend → MyoSapiens SDK → API
This provides:
Always implement proper error handling for production applications:
from myosdk import Client, APIError, NotFoundError, ValidationError
try:
asset = client.assets.get(asset_id)
except NotFoundError:
print("Asset not found")
# Handle missing asset
except ValidationError as e:
print(f"Invalid request: {e.message}")
# Handle validation errors
except APIError as e:
print(f"API error: {e.message}")
# Log and handle error
See the Error Handling guide for complete patterns.
Always verify assets are ready before using them in jobs:
# Check asset status
asset = client.assets.get(asset_id)
if asset["status"] != "completed":
raise ValueError("Asset upload not completed")
# Verify purpose matches
if asset["purpose"] != "retarget":
raise ValueError("Asset has wrong purpose for retarget job")
Periodically clean up unused assets to manage storage:
# List assets with no references
unused = client.assets.list(reference_count=0)
for asset in unused["assets"]:
try:
client.assets.delete(asset["asset_id"])
except ValidationError:
# Asset may have been referenced since listing
pass
Always set timeouts for long-running operations:
# Wait with timeout (5 minutes)
try:
result = client.jobs.wait(job_id, timeout=300)
except TimeoutError:
print("Job did not complete within timeout")
# Handle timeout appropriately
Don't assume jobs succeed - always check status:
result = client.jobs.wait(job_id)
if result["status"] == "FAILED":
error = result.get("error", {})
print(f"Job failed: {error.get('message')}")
# Handle failure
elif result["status"] == "SUCCEEDED":
# Process output
output_asset_id = result["output"]["retarget_output_asset_id"]
client.assets.download(output_asset_id, "output.npz")
Implement retry logic for transient failures:
import time
from myosdk import RateLimitError, ServerError
def retry_with_backoff(func, max_retries=3):
for attempt in range(max_retries):
try:
return func()
except (RateLimitError, ServerError) as e:
if attempt == max_retries - 1:
raise
delay = e.retry_after if hasattr(e, 'retry_after') and e.retry_after else 2 ** attempt
time.sleep(delay)
Let the SDK auto-detect file purposes, or specify explicitly:
# Auto-detection (recommended)
c3d_asset = client.assets.upload_file("motion.c3d") # Detects "retarget"
# Explicit specification (if needed)
c3d_asset = client.assets.upload_file("motion.c3d", purpose="retarget")
Ensure your files are within your plan's size limits. The SDK will automatically handle the upload process:
# Upload files - the SDK handles everything automatically
asset = client.assets.upload_file("motion.c3d")
Always close clients when done, especially in long-running applications:
# Using context manager (recommended)
with Client(api_key=api_key) as client:
# Use client
assets = client.assets.list()
# Or explicitly close
client = Client(api_key=api_key)
try:
# Use client
pass
finally:
client.close()
When listing resources, use pagination for large datasets:
# Process all assets in batches
offset = 0
limit = 50
while True:
result = client.assets.list(limit=limit, offset=offset)
assets = result["assets"]
if not assets:
break
# Process batch
for asset in assets:
process_asset(asset)
offset += limit
if offset >= result["total"]:
break
When possible, batch operations to reduce API calls:
# List all jobs once, then filter in code
all_jobs = client.jobs.list(limit=100)
succeeded = [j for j in all_jobs["jobs"] if j["status"] == "SUCCEEDED"]
Log key operations for debugging and monitoring:
import logging
logger = logging.getLogger(__name__)
try:
job = client.jobs.start_retarget(c3d_asset_id, markerset_asset_id)
logger.info(f"Started retarget job: {job['job_id']}")
except APIError as e:
logger.error(f"Failed to start job: {e.message}", exc_info=True)
raise
For long-running jobs, log progress:
job = client.jobs.start_retarget(c3d_asset_id, markerset_asset_id)
logger.info(f"Job {job['job_id']} queued")
# Poll with logging
while True:
status = client.jobs.get(job["job_id"])
logger.info(f"Job {job['job_id']} status: {status['status']}")
if status["status"] in ("SUCCEEDED", "FAILED", "CANCELED"):
break
time.sleep(5)