Skip to main content

Python Client

Official Python SDK for the Nebula Memory API.

Installation

pip install nebula-client

Setup

# Set environment variable
export NEBULA_API_KEY="your_api_key_here"
from nebula import Nebula

# Automatically uses NEBULA_API_KEY environment variable
nebula = Nebula()

Basic Usage

from nebula import Nebula

nebula = Nebula()

# Create a collection
collection = nebula.create_collection(name="my_notes", description="Personal notes")

# Store a memory
memory_id = nebula.store_memory({
    "collection_id": collection.id,
    "content": "Python is great for data science",
    "metadata": {"topic": "programming", "language": "python"}
})

# Search memories
results = nebula.search(
    query="data science",
    collection_ids=[collection.id],
    limit=10
)

for result in results:
    print(f"Score: {result.score:.2f}")
    print(f"Content: {result.content}")
cURL
# Create a collection
curl -X POST "https://api.nebulacloud.app/v1/collections" \
  -H "Content-Type: application/json" \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -d '{"name": "my_notes", "description": "Personal notes"}'

# Store a memory
curl -X POST "https://api.nebulacloud.app/v1/memories" \
  -H "Content-Type: application/json" \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -d '{
    "collection_ref": "my_notes",
    "engram_type": "document",
    "raw_text": "Python is great for data science",
    "metadata": {"topic": "programming", "language": "python"}
  }'

# Search memories
curl -X POST "https://api.nebulacloud.app/v1/retrieval/search" \
  -H "Content-Type: application/json" \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -d '{
    "query": "data science",
    "collection_ids": ["my_notes"],
    "limit": 10
  }'

Document Upload

# Upload text
doc_id = nebula.create_document_text(
    collection_ref="my-collection",
    raw_text="Machine learning is a subset of AI...",
    metadata={"title": "ML Intro"}
)

# Upload from file
with open("doc.txt", "r") as f:
    doc_id = nebula.create_document_text(
        collection_ref="my-collection",
        raw_text=f.read()
    )

# Upload pre-chunked
doc_id = nebula.create_document_chunks(
    collection_ref="my-collection",
    chunks=["Chunk 1", "Chunk 2", "Chunk 3"]
)

Async Support

The Python SDK includes a dedicated async client:
import asyncio
from nebula import AsyncNebula

async def main():
    # Create async client (uses NEBULA_API_KEY env var)
    nebula = AsyncNebula()

    # All operations are async
    collection = await nebula.create_collection(name="async_notes", description="Async example")

    memory_id = await nebula.store_memory({
        "collection_id": collection.id,
        "content": "Async operations are fast",
        "metadata": {"type": "example"}
    })

    results = await nebula.search(
        query="async",
        collection_ids=[collection.id]
    )

    for result in results:
        print(f"{result.content}")

    # Clean up
    await nebula.aclose()

asyncio.run(main())

Async Context Manager

import asyncio
from nebula import AsyncNebula

async def main():
    # Automatically handles cleanup
    async with AsyncNebula() as nebula:
        collection = await nebula.create_collection(name="notes", description="My notes")

        memory_id = await nebula.store_memory({
            "collection_id": collection.id,
            "content": "Context managers are clean"
        })

        results = await nebula.search(
            query="clean",
            collection_ids=[collection.id]
        )

        print(f"Found {len(results)} results")

asyncio.run(main())

Next Steps

For detailed examples and advanced features, see: