import asynciofrom nebula_sdk import create_agentasync def basic_chat(): agent = await create_agent({ 'name': 'Chat Assistant', 'provider_address': '0xf07240Efa67755B5311bc75784a061eDB47165Dd', 'memory_bucket': 'chat-memory', 'private_key': 'your-private-key' }) await agent.init() # Set system prompt for the agent agent.set_system_prompt('You are a helpful AI assistant.') # Ask a question response = await agent.ask('Hello, how can you help me today?') print(response)asyncio.run(basic_chat())
Build an agent that maintains conversation context:
import asynciofrom nebula_sdk import create_agentasync def contextual_agent(): agent = await create_agent({ 'name': 'Contextual Assistant', 'provider_address': '0xf07240Efa67755B5311bc75784a061eDB47165Dd', 'memory_bucket': 'contextual-memory', 'private_key': 'your-private-key' }) await agent.init() # Set up the agent's behavior agent.set_system_prompt(''' You are a helpful programming assistant. When users ask questions: 1. Remember the context of our conversation 2. Provide clear, practical examples 3. Ask follow-up questions when needed 4. Reference previous topics when relevant ''') # Have a multi-turn conversation print("=== Conversation Start ===") response1 = await agent.chat_with_context('I want to learn Python') print(f"Agent: {response1}") response2 = await agent.chat_with_context('What should I start with?') print(f"Agent: {response2}") response3 = await agent.chat_with_context('Can you show me an example?') print(f"Agent: {response3}") # Save the conversation for future reference conversation_id = await agent.save_conversation('python_learning_session') print(f"\nConversation saved with ID: {conversation_id}")asyncio.run(contextual_agent())
import asynciofrom nebula_sdk import create_agentasync def advanced_agent(): agent = await create_agent({ 'name': 'Advanced Assistant', 'provider_address': '0x3feE5a4dd5FDb8a32dDA97Bed899830605dBD9D3', # deepseek-r1-70b for reasoning 'memory_bucket': 'advanced-memory', 'private_key': 'your-private-key', 'rpc_url': 'https://evmrpc-testnet.0g.ai', 'indexer_rpc': 'https://indexer-storage-testnet-turbo.0g.ai', 'kv_rpc': 'http://3.101.147.150:6789', 'max_ephemeral_messages': 100, # Keep more conversation history 'temperature': 0.3, # More focused responses 'max_tokens': 2000 # Longer responses }) await agent.init() # Configure for complex reasoning tasks agent.set_system_prompt(''' You are an advanced AI assistant specialized in problem-solving and analysis. When approaching problems: 1. Break them down into smaller components 2. Consider multiple perspectives 3. Show your reasoning process 4. Provide actionable recommendations ''') # Test with a complex question response = await agent.ask(''' I'm building a web application that needs to handle 10,000 concurrent users. What architecture considerations should I keep in mind, and what technologies would you recommend for the backend, database, and deployment? ''') print(response) # Get agent statistics stats = agent.get_stats() print(f"\nAgent Stats:") print(f"- Name: {stats['name']}") print(f"- Messages in memory: {stats['memory']['ephemeral_messages']}") print(f"- Temperature: {stats['chat']['temperature']}") print(f"- Max tokens: {stats['chat']['max_tokens']}")asyncio.run(advanced_agent())