|
| 1 | +import pytest |
| 2 | +from unittest.mock import Mock, patch |
| 3 | +import json |
| 4 | +import os |
| 5 | + |
| 6 | +from . import oai |
| 7 | +from .oai import LLMClient, prepare_openai, post_process_response, run_cached_request, arun_cached_request |
| 8 | + |
| 9 | +class MockOpenAIResponse: |
| 10 | + def dict(self): |
| 11 | + return {"response": "test"} |
| 12 | + |
| 13 | +class MockRateLimitError(Exception): |
| 14 | + pass |
| 15 | + |
| 16 | +class MockCompletions: |
| 17 | + def create(self, **kwargs): |
| 18 | + return MockOpenAIResponse() |
| 19 | + |
| 20 | +class MockChat: |
| 21 | + def __init__(self): |
| 22 | + self.completions = MockCompletions() |
| 23 | + |
| 24 | +class MockEmbeddings: |
| 25 | + def create(self, **kwargs): |
| 26 | + return MockOpenAIResponse() |
| 27 | + |
| 28 | +class MockModerations: |
| 29 | + def create(self, **kwargs): |
| 30 | + return MockOpenAIResponse() |
| 31 | + |
| 32 | +class MockOpenAI: |
| 33 | + def __init__(self, **kwargs): |
| 34 | + self.default_headers = kwargs.get('default_headers', {}) |
| 35 | + self.default_query = kwargs.get('default_query', {}) |
| 36 | + self.chat = MockChat() |
| 37 | + self.embeddings = MockEmbeddings() |
| 38 | + self.moderations = MockModerations() |
| 39 | + self.RateLimitError = MockRateLimitError |
| 40 | + |
| 41 | +def test_openai_sync(): |
| 42 | + """Test basic OpenAI client functionality with a simple completion request""" |
| 43 | + mock_openai = MockOpenAI() |
| 44 | + client = LLMClient( |
| 45 | + openai=mock_openai, |
| 46 | + complete=mock_openai.chat.completions.create, |
| 47 | + embed=mock_openai.embeddings.create, |
| 48 | + moderation=mock_openai.moderations.create, |
| 49 | + RateLimitError=MockRateLimitError |
| 50 | + ) |
| 51 | + |
| 52 | + response = run_cached_request( |
| 53 | + client=client, |
| 54 | + request_type="complete", |
| 55 | + messages=[ |
| 56 | + { |
| 57 | + "role": "system", |
| 58 | + "content": "You are a helpful assistant." |
| 59 | + }, |
| 60 | + { |
| 61 | + "role": "user", |
| 62 | + "content": "What is 2+2?" |
| 63 | + } |
| 64 | + ], |
| 65 | + model="gpt-3.5-turbo", |
| 66 | + max_tokens=50 |
| 67 | + ) |
| 68 | + |
| 69 | + assert response == {"response": "test"} |
| 70 | + |
| 71 | +@patch('openai.OpenAI') |
| 72 | +@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) |
| 73 | +def test_openai_headers(mock_openai): |
| 74 | + """Test OpenAI client with custom headers""" |
| 75 | + mock_instance = MockOpenAI(default_headers={"X-Custom-Header": "test", "X-Request-Source": "autoevals"}) |
| 76 | + mock_openai.return_value = mock_instance |
| 77 | + with patch.dict(os.environ, {'OPENAI_DEFAULT_HEADERS': json.dumps({"X-Custom-Header": "test"})}): |
| 78 | + client, wrapped = prepare_openai() |
| 79 | + assert isinstance(client, LLMClient) |
| 80 | + assert mock_instance.default_headers["X-Custom-Header"] == "test" |
| 81 | + assert mock_instance.default_headers["X-Request-Source"] == "autoevals" |
| 82 | + |
| 83 | +@patch('openai.OpenAI') |
| 84 | +@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) |
| 85 | +def test_openai_query_params(mock_openai): |
| 86 | + """Test OpenAI client with custom query parameters""" |
| 87 | + mock_instance = MockOpenAI(default_query={"custom_param": "test"}) |
| 88 | + mock_openai.return_value = mock_instance |
| 89 | + with patch.dict(os.environ, {'OPENAI_DEFAULT_QUERY': json.dumps({"custom_param": "test"})}): |
| 90 | + client, wrapped = prepare_openai() |
| 91 | + assert isinstance(client, LLMClient) |
| 92 | + assert mock_instance.default_query["custom_param"] == "test" |
| 93 | + |
| 94 | +@patch('openai.OpenAI') |
| 95 | +@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) |
| 96 | +def test_invalid_header_json(mock_openai): |
| 97 | + """Test handling of invalid header JSON""" |
| 98 | + mock_instance = MockOpenAI(default_headers={"X-Request-Source": "autoevals"}) |
| 99 | + mock_openai.return_value = mock_instance |
| 100 | + with patch.dict(os.environ, {'OPENAI_DEFAULT_HEADERS': 'invalid json'}): |
| 101 | + client, wrapped = prepare_openai() |
| 102 | + assert isinstance(client, LLMClient) |
| 103 | + assert mock_instance.default_headers["X-Request-Source"] == "autoevals" |
| 104 | + assert len(mock_instance.default_headers) == 1 |
| 105 | + |
| 106 | +@patch('openai.OpenAI') |
| 107 | +@patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) |
| 108 | +def test_invalid_query_json(mock_openai): |
| 109 | + """Test handling of invalid query JSON""" |
| 110 | + mock_instance = MockOpenAI() |
| 111 | + mock_openai.return_value = mock_instance |
| 112 | + with patch.dict(os.environ, {'OPENAI_DEFAULT_QUERY': 'invalid json'}): |
| 113 | + client, wrapped = prepare_openai() |
| 114 | + assert isinstance(client, LLMClient) |
| 115 | + assert len(mock_instance.default_query) == 0 |
0 commit comments