Some checks are pending
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.10) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.11) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.12) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🛡️ Security Scanning (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 🐳 Docker Build & Push (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 🎉 Create Release (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 📢 Deployment Notification (push) Blocked by required conditions
- Unified access to 4 major LLM providers (Gemini, OpenAI, Anthropic, Grok) - Real-time streaming support across all providers - Multimodal capabilities (text, images, audio) - Intelligent document processing with smart chunking - Production-ready with health monitoring and error handling - Full OpenAI ecosystem integration (Assistants, DALL-E, Whisper) - Vector embeddings and semantic similarity - Session-based API key management - Built with FastMCP and modern Python tooling 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
97 lines
2.7 KiB
Python
97 lines
2.7 KiB
Python
#!/usr/bin/env python3
|
|
"""Test streaming functionality directly."""
|
|
|
|
import os
|
|
import base64
|
|
from openai import OpenAI
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv()
|
|
|
|
def test_text_streaming():
|
|
"""Test streaming text generation."""
|
|
print("Testing text streaming...")
|
|
print("=" * 50)
|
|
|
|
client = OpenAI(
|
|
api_key=os.getenv("GOOGLE_API_KEY"),
|
|
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
|
)
|
|
|
|
stream = client.chat.completions.create(
|
|
model="gemini-1.5-flash",
|
|
messages=[
|
|
{"role": "user", "content": "Write a short poem about coding"}
|
|
],
|
|
stream=True
|
|
)
|
|
|
|
full_text = ""
|
|
for chunk in stream:
|
|
if chunk.choices[0].delta.content is not None:
|
|
content = chunk.choices[0].delta.content
|
|
full_text += content
|
|
print(content, end="", flush=True)
|
|
|
|
print("\n" + "=" * 50)
|
|
print("Text streaming completed!")
|
|
print(f"Full text length: {len(full_text)}")
|
|
|
|
def test_image_analysis():
|
|
"""Test image analysis (if image exists)."""
|
|
print("\nTesting image analysis...")
|
|
print("=" * 50)
|
|
|
|
# Create a simple test image path (you can replace with actual image)
|
|
image_path = "test_image.jpg"
|
|
|
|
if not os.path.exists(image_path):
|
|
print(f"No test image found at {image_path}, skipping image test")
|
|
return
|
|
|
|
client = OpenAI(
|
|
api_key=os.getenv("GOOGLE_API_KEY"),
|
|
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
|
)
|
|
|
|
# Encode image
|
|
with open(image_path, "rb") as image_file:
|
|
base64_image = base64.b64encode(image_file.read()).decode('utf-8')
|
|
|
|
stream = client.chat.completions.create(
|
|
model="gemini-2.0-flash",
|
|
messages=[
|
|
{
|
|
"role": "user",
|
|
"content": [
|
|
{"type": "text", "text": "What is in this image?"},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": f"data:image/jpeg;base64,{base64_image}"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
],
|
|
stream=True
|
|
)
|
|
|
|
full_text = ""
|
|
for chunk in stream:
|
|
if chunk.choices[0].delta.content is not None:
|
|
content = chunk.choices[0].delta.content
|
|
full_text += content
|
|
print(content, end="", flush=True)
|
|
|
|
print("\n" + "=" * 50)
|
|
print("Image analysis completed!")
|
|
print(f"Full text length: {len(full_text)}")
|
|
|
|
if __name__ == "__main__":
|
|
if not os.getenv("GOOGLE_API_KEY"):
|
|
print("Please set GOOGLE_API_KEY environment variable")
|
|
exit(1)
|
|
|
|
test_text_streaming()
|
|
test_image_analysis() |