Skip to content

Python SDK

The official Python SDK is published as onceonly-sdk on PyPI.

Terminal window
pip install onceonly-sdk
import os
from onceonly import OnceOnly
client = OnceOnly(api_key=os.environ["ONCEONLY_API_KEY"])

If you self-host, pass a custom base_url. The SDK accepts either the root URL or a URL that already includes /v1:

client = OnceOnly(
api_key=os.environ["ONCEONLY_API_KEY"],
base_url="http://localhost:8080",
)

You can also use context managers:

from onceonly import OnceOnly
with OnceOnly(api_key="once_live_xxx") as client:
stats = client.usage_all()
from onceonly import OnceOnly
async with OnceOnly(api_key="once_live_xxx") as client:
stats = await client.usage_all_async()

lock = client.check_lock(
key="payment:invoice:INV-123",
ttl=3600,
meta={"invoice_id": "INV-123", "amount_usd": 99.99},
)
if lock.duplicate:
# Already executed within TTL window.
# Return the cached result from your own storage.
raise RuntimeError(f"Duplicate (first_seen_at={lock.first_seen_at})")
# Safe to run the real side-effect here.
charge_customer()

Async variant:

lock = await client.check_lock_async(
key="payment:invoice:INV-123",
ttl=3600,
meta={"invoice_id": "INV-123"},
)

Use ai.run() when you want OnceOnly to start or attach to keyed work.

run = client.ai.run(
key="support_chat:abc123",
ttl=1800,
metadata={
"run_id": "run_support_001",
"agent_id": "support_bot",
"actions": [
{
"tool": "send_email",
"args": {"to": "[email protected]", "subject": "Resolved"},
"spend_usd": 0.001,
}
],
},
)
print(run.status, run.lease_id, run.version)
final = client.ai.run_and_wait(
key="support_chat:abc123",
ttl=1800,
metadata={
"run_id": "run_support_001",
"agent_id": "support_bot",
"actions": [
{
"tool": "send_email",
"args": {"to": "[email protected]", "subject": "Resolved"},
"spend_usd": 0.001,
}
],
},
timeout=120,
)
print(final.status, final.error_code, final.result)

Use run_tool() for the synchronous agent_id + tool + args flow:

res = client.ai.run_tool(
agent_id="support_bot",
tool="send_email",
args={
"subject": "Hello",
"run_id": "run_support_001",
},
spend_usd=0.001,
)
if res.allowed:
print(res.decision, res.result)
else:
print(res.decision, res.policy_reason)

The SDK also wraps the low-level lease endpoints directly:

lease = client.ai.lease(key="support_chat:abc123", ttl=1800, metadata={"chat_id": "abc123"})
status = client.ai.status("support_chat:abc123")
result = client.ai.result("support_chat:abc123")
client.ai.extend(key="support_chat:abc123", lease_id=lease["lease_id"], ttl=1800)
client.ai.complete(key="support_chat:abc123", lease_id=lease["lease_id"], result={"status": "done"})
# or
client.ai.fail(key="support_chat:abc123", lease_id=lease["lease_id"], error_code="chat_failed")

policy = client.gov.upsert_policy({
"agent_id": "support_bot",
"allowed_tools": ["send_email", "create_ticket"],
"blocked_tools": ["delete_user"],
"max_actions_per_hour": 100,
"max_spend_usd_per_day": 50.0,
})
print(policy.agent_id, policy.max_actions_per_hour)

See also: Policy Templates


import os
tool = client.gov.create_tool({
"name": "send_email",
"scope_id": "global",
"url": "https://your-domain.com/tools/send-email",
"auth": {"type": "hmac_sha256", "secret": os.environ["TOOL_SECRET"]},
"timeout_ms": 15000,
"max_retries": 2,
"enabled": True,
"description": "Send transactional email",
})
print(tool["name"], tool["enabled"])

List, fetch, toggle, and delete:

tools = client.gov.list_tools(scope_id="global")
tool = client.gov.get_tool("send_email", scope_id="global")
disabled = client.gov.toggle_tool("send_email", enabled=False, scope_id="global")
deleted = client.gov.delete_tool("send_email", scope_id="global")

See also: Implementing Tool Backends


make_usage = client.usage("make")
ai_usage = client.usage("ai")
all_usage = client.usage_all()
print(make_usage["usage"], make_usage["requests_total_month"])
print(all_usage["ai"]["charged_total_month"])

Structured run events:

event = client.post_event(
run_id="run_support_001",
type="tool_call",
status="start",
step="step_1",
tool="send_email",
agent_id="support_bot",
)
timeline = client.get_run_timeline("run_support_001")
recent_events = client.events(limit=20)
print(event["event_id"])
print(timeline["total"])
print(len(recent_events))

The current SDK wraps:

  • usage() / usage_async()
  • usage_all() / usage_all_async()
  • post_event() / post_event_async()
  • get_run_timeline() / get_run_timeline_async()
  • events() / events_async()

The current REST API also has GET /v1/runs, but there is no dedicated list_runs() helper in the SDK yet.


import os
from onceonly import OnceOnly
async def main():
async with OnceOnly(api_key=os.environ["ONCEONLY_API_KEY"]) as client:
lock = await client.check_lock_async(
key="payment:invoice:INV-123",
ttl=3600,
meta={"invoice_id": "INV-123"},
)
tool = await client.gov.create_tool_async({
"name": "send_email",
"scope_id": "global",
"url": "https://your-domain.com/tools/send-email",
"auth": {"type": "hmac_sha256", "secret": os.environ["TOOL_SECRET"]},
"timeout_ms": 15000,
"max_retries": 2,
})
final = await client.ai.run_and_wait_async(
key="support_chat:abc123",
ttl=1800,
metadata={
"run_id": "run_support_001",
"agent_id": "support_bot",
"actions": [
{
"tool": "send_email",
"args": {"to": "[email protected]", "subject": "Resolved"},
"spend_usd": 0.001,
}
],
},
timeout=120,
)
usage = await client.usage_all_async()
timeline = await client.get_run_timeline_async("run_support_001")
print(lock.duplicate, tool["name"], final.status, usage["plan"], timeline["total"])

Common async helpers include:

  • check_lock_async()
  • usage_async() / usage_all_async()
  • events_async() / post_event_async() / get_run_timeline_async()
  • client.ai.run_async() / run_and_wait_async() / run_tool_async()
  • client.ai.lease_async() / extend_async() / complete_async() / fail_async()
  • client.gov.create_tool_async() / list_tools_async() / get_tool_async() / toggle_tool_async() / delete_tool_async()

The package exports two decorators from onceonly:

  • @idempotent(...) for check-lock
  • @idempotent_ai(...) for exactly-once local execution with AI leases
from onceonly import OnceOnly, idempotent
client = OnceOnly(api_key="once_live_xxx")
@idempotent(
client,
key_prefix="payments",
ttl=3600,
on_duplicate=lambda invoice_id: {"status": "duplicate", "invoice_id": invoice_id},
)
def charge_invoice(invoice_id: str) -> dict:
return {"status": "paid", "invoice_id": invoice_id}
from onceonly import OnceOnly, idempotent_ai
client = OnceOnly(api_key="once_live_xxx")
@idempotent_ai(
client,
key_fn=lambda chat_id, messages: f"support_chat:{chat_id}",
ttl=1800,
metadata_fn=lambda chat_id, messages: {"chat_id": chat_id, "message_count": len(messages)},
)
def handle_support_chat(chat_id: str, messages: list[dict]) -> dict:
return {"status": "resolved", "messages": len(messages)}
result = handle_support_chat("chat_123", [{"role": "user", "content": "Help"}])
print(result.status, result.result)

idempotent_ai returns an AiResult, not the raw function output, because completion is normalized through the lease/result flow.


See also: AI Run API | Runs & Events API | Usage API