Created
February 2, 2026 15:37
-
-
Save rpappalax/473e9494d65f778d9dccfa458b0fe7aa to your computer and use it in GitHub Desktop.
app/llm/gemini_client.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from __future__ import annotations | |
| from vertexai.preview.generative_models import GenerativeModel, Part | |
| from app.common.config import Settings | |
| from app.llm.vertex_init import init_vertex | |
| def generate( | |
| settings: Settings, | |
| parts: list[Part] | Part, | |
| *, | |
| temperature: float = 0.3, | |
| max_output_tokens: int = 1024, | |
| ) -> str: | |
| """ | |
| Generate text using a Gemini model on Vertex AI. | |
| `parts` can be: | |
| - a single Part (text or image) | |
| - a list of Parts (multimodal prompt) | |
| """ | |
| init_vertex(settings) | |
| model = GenerativeModel(settings.gemini_model) | |
| response = model.generate_content( | |
| parts, | |
| generation_config={ | |
| "temperature": temperature, | |
| "max_output_tokens": max_output_tokens, | |
| }, | |
| ) | |
| # Vertex responses expose `.text` for convenience | |
| return getattr(response, "text", "") or "" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment