#!/usr/bin/env python3 """ Example Usage of Prompt Templates and Test Suite with Advanced Multi-Model Orchestrator """ import asyncio import sys from pathlib import Path # Add current directory to path sys.path.append(str(Path(__file__).parent)) from prompt_template import PromptTemplates, TaskType, TestPrompt from test_suite import OrchestratorTester, TestRunner async def example_with_advanced_orchestrator(): """Example using the Advanced Multi-Model Orchestrator""" print("๐Ÿš€ Example: Testing with Advanced Orchestrator") print("=" * 60) try: # Import the advanced orchestrator from advanced_multi_model_orchestrator_complete import AdvancedMultiModelOrchestrator # Initialize the orchestrator print("๐Ÿ“ฆ Initializing Advanced Multi-Model Orchestrator...") orchestrator = AdvancedMultiModelOrchestrator(parent_model_name="distilgpt2") print("โœ… Orchestrator initialized successfully!") # Example 1: Test a few specific prompts print("\n๐Ÿ“ Example 1: Testing Specific Prompts") print("-" * 40) test_prompts = [ "What is machine learning?", "Generate an image of a peaceful forest", "Describe this image of a sunset", "Explain step by step how neural networks work" ] for i, prompt in enumerate(test_prompts, 1): print(f"\n{i}. Testing: {prompt}") result = await orchestrator.process_request(prompt) print(f" Task Type: {result.task_type.value}") print(f" Confidence: {result.confidence:.2f}") print(f" Success: {result.success}") if result.output: print(f" Output: {result.output[:100]}...") # Example 2: Use prompt templates print("\n๐Ÿ“‹ Example 2: Using Prompt Templates") print("-" * 40) # Get some text processing prompts text_prompts = PromptTemplates.get_prompts_by_task_type(TaskType.TEXT)[:3] for prompt in text_prompts: print(f"\nTesting: {prompt.prompt}") result = await orchestrator.process_request(prompt.prompt) expected = prompt.expected_task.value actual = result.task_type.value correct = "โœ…" if expected == actual else "โŒ" print(f" Expected: {expected} | Actual: {actual} {correct}") print(f" Confidence: {result.confidence:.2f}") # Example 3: Run a quick test suite print("\n๐Ÿงช Example 3: Running Quick Test Suite") print("-" * 40) tester = OrchestratorTester(orchestrator) # Test basic functionality basic_result = await tester.run_basic_tests() print(f"Basic Test Results:") print(f" Total Tests: {basic_result.total_tests}") print(f" Accuracy: {basic_result.accuracy:.1%}") print(f" Avg Confidence: {basic_result.average_confidence:.2f}") return True except ImportError as e: print(f"โŒ Could not import AdvancedMultiModelOrchestrator: {e}") print("Make sure the orchestrator file is available.") return False except Exception as e: print(f"โŒ Error: {e}") return False async def example_with_demo_orchestrator(): """Example using the Demo Orchestrator""" print("๐ŸŽฎ Example: Testing with Demo Orchestrator") print("=" * 60) try: # Import the demo orchestrator from advanced_multi_model_orchestrator_complete import DemoAdvancedOrchestrator # Initialize the demo orchestrator print("๐Ÿ“ฆ Initializing Demo Orchestrator...") orchestrator = DemoAdvancedOrchestrator() print("โœ… Demo orchestrator initialized!") # Test with different types of prompts print("\n๐Ÿ“ Testing Different Prompt Types") print("-" * 40) # Get one prompt from each category categories = [TaskType.TEXT, TaskType.CAPTION, TaskType.TEXT2IMG, TaskType.MULTIMODAL, TaskType.REASONING] for category in categories: prompts = PromptTemplates.get_prompts_by_task_type(category) if prompts: prompt = prompts[0] print(f"\nTesting {category.value}: {prompt.prompt[:50]}...") result = await orchestrator.process_request(prompt.prompt) print(f" Result: {result.task_type.value}") print(f" Confidence: {result.confidence:.2f}") return True except ImportError as e: print(f"โŒ Could not import DemoAdvancedOrchestrator: {e}") return False except Exception as e: print(f"โŒ Error: {e}") return False async def example_custom_testing(): """Example of custom testing scenarios""" print("๐Ÿ”ง Example: Custom Testing Scenarios") print("=" * 60) # Create a mock orchestrator for demonstration class MockOrchestrator: async def process_request(self, prompt): class MockResult: def __init__(self, prompt): # Simple routing logic if "image" in prompt.lower() or "generate" in prompt.lower(): task_type = "TEXT2IMG" elif "describe" in prompt.lower() or "caption" in prompt.lower(): task_type = "CAPTION" elif "explain" in prompt.lower() or "analyze" in prompt.lower(): task_type = "REASONING" else: task_type = "TEXT" self.task_type = type('TaskType', (), {'value': task_type})() self.confidence = 0.7 self.success = True self.output = f"Mock response: {prompt[:30]}..." self.error_message = None return MockResult(prompt) orchestrator = MockOrchestrator() # Example 1: Test specific categories print("\n๐Ÿ“Š Example 1: Testing by Category") print("-" * 40) categories = ["education", "creative", "practical"] for category in categories: prompts = PromptTemplates.get_prompts_by_category(category)[:2] print(f"\nTesting {category} category ({len(prompts)} prompts):") for prompt in prompts: result = await orchestrator.process_request(prompt.prompt) print(f" {prompt.prompt[:40]:<40} -> {result.task_type.value}") # Example 2: Performance testing print("\nโšก Example 2: Performance Testing") print("-" * 40) import time from prompt_template import SpecializedPrompts performance_prompts = SpecializedPrompts.PERFORMANCE_PROMPTS print(f"Testing {len(performance_prompts)} performance prompts:") total_time = 0 for prompt in performance_prompts: start_time = time.time() result = await orchestrator.process_request(prompt) processing_time = time.time() - start_time total_time += processing_time print(f" {prompt[:30]:<30} | {processing_time:.3f}s") avg_time = total_time / len(performance_prompts) print(f"\nAverage processing time: {avg_time:.3f}s") # Example 3: Custom test scenarios print("\n๐ŸŽฏ Example 3: Custom Test Scenarios") print("-" * 40) # Create custom test prompts custom_prompts = [ TestPrompt( prompt="Analyze the impact of AI on healthcare", expected_task=TaskType.REASONING, description="Healthcare AI analysis", category="healthcare" ), TestPrompt( prompt="Create a visual representation of data flow", expected_task=TaskType.TEXT2IMG, description="Data visualization", category="business" ), TestPrompt( prompt="Summarize the latest AI research findings", expected_task=TaskType.TEXT, description="Research summary", category="research" ) ] tester = OrchestratorTester(orchestrator) print("Testing custom scenarios:") for prompt in custom_prompts: result = await tester.test_single_prompt(prompt) status = "โœ…" if result.task_correct else "โŒ" print(f" {status} {prompt.description}: {result.actual_task} (expected: {prompt.expected_task.value})") async def example_prompt_generation(): """Example of prompt generation utilities""" print("๐Ÿ”„ Example: Prompt Generation Utilities") print("=" * 60) # Example 1: Generate variations print("\n๐Ÿ“ Example 1: Prompt Variations") print("-" * 40) base_prompt = "explain machine learning" from prompt_template import PromptGenerator variations = PromptGenerator.generate_variations(base_prompt, 3) for i, variation in enumerate(variations, 1): print(f" {i}. {variation}") # Example 2: Contextual prompts print("\n๐ŸŽฏ Example 2: Contextual Prompts") print("-" * 40) contexts = ["education", "business", "research"] for context in contexts: contextual_prompts = PromptGenerator.generate_contextual_prompts(context) print(f"\n{context.title()} context prompts:") for prompt in contextual_prompts: print(f" - {prompt.prompt}") # Example 3: Statistics print("\n๐Ÿ“Š Example 3: Prompt Statistics") print("-" * 40) stats = PromptTemplates.get_prompt_statistics() print(f"Total prompts: {stats['total_prompts']}") print(f"By task type: {stats['by_task_type']}") print(f"Confidence ranges: {stats['confidence_ranges']}") def main(): """Main function to run examples""" print("๐Ÿš€ Advanced Multi-Model Orchestrator - Example Usage") print("=" * 70) if len(sys.argv) > 1: example_type = sys.argv[1].lower() else: print("Available examples:") print(" 1. advanced - Test with Advanced Orchestrator") print(" 2. demo - Test with Demo Orchestrator") print(" 3. custom - Custom testing scenarios") print(" 4. prompts - Prompt generation utilities") print() example_type = input("Enter example type (1-4 or name): ").strip().lower() # Map input to example functions examples = { '1': 'advanced', '2': 'demo', '3': 'custom', '4': 'prompts', 'advanced': 'advanced', 'demo': 'demo', 'custom': 'custom', 'prompts': 'prompts' } example_type = examples.get(example_type, 'custom') try: if example_type == 'advanced': success = asyncio.run(example_with_advanced_orchestrator()) elif example_type == 'demo': success = asyncio.run(example_with_demo_orchestrator()) elif example_type == 'custom': asyncio.run(example_custom_testing()) success = True elif example_type == 'prompts': asyncio.run(example_prompt_generation()) success = True else: print("โŒ Unknown example type. Running custom example...") asyncio.run(example_custom_testing()) success = True if success: print("\nโœ… Example completed successfully!") else: print("\nโŒ Example failed. Check the error messages above.") except KeyboardInterrupt: print("\n\nโน๏ธ Example interrupted by user") except Exception as e: print(f"\nโŒ Error running example: {e}") if __name__ == "__main__": main()