diff --git a/pyproject.toml b/pyproject.toml index ff52e089a..f4393c0c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "uipath" -version = "2.6.22" +version = "2.6.23" description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools." readme = { file = "README.md", content-type = "text/markdown" } requires-python = ">=3.11" diff --git a/src/uipath/platform/_uipath.py b/src/uipath/platform/_uipath.py index 4c99c6037..863ee98d7 100644 --- a/src/uipath/platform/_uipath.py +++ b/src/uipath/platform/_uipath.py @@ -6,7 +6,13 @@ from .._utils._auth import resolve_config_from_env from .action_center import TasksService from .agenthub._agenthub_service import AgentHubService -from .chat import ConversationsService, UiPathLlmChatService, UiPathOpenAIService +from .chat import ( + ConversationsService, + UiPathBedrockService, + UiPathLlmChatService, + UiPathOpenAIService, + UiPathVertexService, +) from .common import ( ApiClient, ExternalApplicationService, @@ -133,6 +139,14 @@ def llm_openai(self) -> UiPathOpenAIService: def llm(self) -> UiPathLlmChatService: return UiPathLlmChatService(self._config, self._execution_context) + @property + def llm_vertex(self) -> UiPathVertexService: + return UiPathVertexService(self._config, self._execution_context) + + @property + def llm_bedrock(self) -> UiPathBedrockService: + return UiPathBedrockService(self._config, self._execution_context) + @property def entities(self) -> EntitiesService: return EntitiesService(self._config, self._execution_context) diff --git a/src/uipath/platform/chat/__init__.py b/src/uipath/platform/chat/__init__.py index 8aa491606..98929167d 100644 --- a/src/uipath/platform/chat/__init__.py +++ b/src/uipath/platform/chat/__init__.py @@ -7,13 +7,19 @@ from ._conversations_service import ConversationsService from ._llm_gateway_service import ( + APIFlavor, + BedrockModels, ChatModels, EmbeddingModels, + GeminiModels, + UiPathBedrockService, UiPathLlmChatService, UiPathOpenAIService, + UiPathVertexService, ) from .llm_gateway import ( AutoToolChoice, + BedrockCompletion, ChatCompletion, ChatCompletionChoice, ChatCompletionUsage, @@ -29,6 +35,7 @@ ToolFunctionDefinition, ToolParametersDefinition, ToolPropertyDefinition, + VertexCompletion, ) from .llm_throttle import get_llm_semaphore, set_llm_concurrency @@ -36,10 +43,15 @@ # Conversations Service "ConversationsService", # LLM Gateway Services + "APIFlavor", + "BedrockModels", "ChatModels", "EmbeddingModels", + "GeminiModels", + "UiPathBedrockService", "UiPathLlmChatService", "UiPathOpenAIService", + "UiPathVertexService", # LLM Throttling "get_llm_semaphore", "set_llm_concurrency", @@ -55,6 +67,8 @@ "ChatCompletionChoice", "ChatCompletionUsage", "ChatCompletion", + "VertexCompletion", + "BedrockCompletion", "EmbeddingItem", "EmbeddingUsage", "TextEmbedding", diff --git a/src/uipath/platform/chat/_llm_gateway_service.py b/src/uipath/platform/chat/_llm_gateway_service.py index d75fea498..973a0da0f 100644 --- a/src/uipath/platform/chat/_llm_gateway_service.py +++ b/src/uipath/platform/chat/_llm_gateway_service.py @@ -16,6 +16,7 @@ UiPathLlmChatService: Service using UiPath's normalized API format """ +from enum import StrEnum from typing import Any from opentelemetry import trace @@ -26,14 +27,29 @@ from ...utils import EndpointManager from ..common import BaseService, UiPathApiConfig, UiPathExecutionContext from .llm_gateway import ( + BedrockCompletion, ChatCompletion, SpecificToolChoice, TextEmbedding, ToolChoice, ToolDefinition, + VertexCompletion, ) from .llm_throttle import get_llm_semaphore + +class APIFlavor(StrEnum): + """API flavor for LLM communication.""" + + AUTO = "auto" + OPENAI_RESPONSES = "OpenAIResponses" + OPENAI_COMPLETIONS = "OpenAiChatCompletions" + AWS_BEDROCK_CONVERSE = "AwsBedrockConverse" + AWS_BEDROCK_INVOKE = "AwsBedrockInvoke" + VERTEX_GEMINI_GENERATE_CONTENT = "GeminiGenerateContent" + VERTEX_ANTHROPIC_CLAUDE = "AnthropicClaude" + + # Common constants API_VERSION = "2024-10-21" # Standard API version for OpenAI-compatible endpoints NORMALIZED_API_VERSION = ( @@ -78,6 +94,32 @@ class EmbeddingModels(object): text_embedding_ada_002 = "text-embedding-ada-002" +class GeminiModels(object): + """Available Google Gemini models for Vertex AI. + + This class provides constants for the supported Gemini models that can be used + with UiPathVertexService. + """ + + gemini_2_5_pro = "gemini-2.5-pro" + gemini_2_5_flash = "gemini-2.5-flash" + gemini_2_0_flash_001 = "gemini-2.0-flash-001" + gemini_3_pro_preview = "gemini-3-pro-preview" + + +class BedrockModels(object): + """Available AWS Bedrock models. + + This class provides constants for the supported Bedrock models that can be used + with UiPathBedrockService. + """ + + anthropic_claude_3_7_sonnet = "anthropic.claude-3-7-sonnet-20250219-v1:0" + anthropic_claude_sonnet_4 = "anthropic.claude-sonnet-4-20250514-v1:0" + anthropic_claude_sonnet_4_5 = "anthropic.claude-sonnet-4-5-20250929-v1:0" + anthropic_claude_haiku_4_5 = "anthropic.claude-haiku-4-5-20251001-v1:0" + + def _cleanup_schema(schema: dict[str, Any]) -> dict[str, Any]: """Clean up a JSON schema for use with LLM Gateway. @@ -212,6 +254,8 @@ async def chat_completions( temperature: float = 0, response_format: dict[str, Any] | type[BaseModel] | None = None, api_version: str = API_VERSION, + api_flavor: APIFlavor = APIFlavor.AUTO, + vendor: str = "openai", ): """Generate chat completions using UiPath's LLM Gateway service. @@ -238,6 +282,12 @@ async def chat_completions( - A Pydantic BaseModel class (automatically converted to JSON schema) Used to enable JSON mode or other structured outputs. Defaults to None. api_version (str, optional): The API version to use. Defaults to API_VERSION. + api_flavor (APIFlavor, optional): The API flavor to use for the request. + Defaults to APIFlavor.AUTO. Available options are: + - APIFlavor.AUTO: Let the gateway auto-detect the flavor + - APIFlavor.OPENAI_COMPLETIONS: Use OpenAI chat completions format + - APIFlavor.OPENAI_RESPONSES: Use OpenAI responses format + vendor (str, optional): The vendor/provider for the model. Defaults to "openai". Returns: ChatCompletion: The chat completion response containing the generated message, @@ -281,6 +331,12 @@ class Country(BaseModel): response_format=Country, # Pass BaseModel directly max_tokens=1000 ) + + # Using a specific API flavor + response = await service.chat_completions( + messages, + api_flavor=APIFlavor.OPENAI_COMPLETIONS + ) ``` Note: @@ -293,8 +349,8 @@ class Country(BaseModel): span.set_attribute("model", model) span.set_attribute("uipath.custom_instrumentation", True) - endpoint = EndpointManager.get_passthrough_endpoint().format( - model=model, api_version=api_version + endpoint = EndpointManager.get_vendor_endpoint().format( + vendor=vendor, model=model ) endpoint = Endpoint("/" + endpoint) @@ -323,13 +379,18 @@ class Country(BaseModel): # Use provided dictionary format directly request_body["response_format"] = response_format + headers = { + **DEFAULT_LLM_HEADERS, + "X-UiPath-LlmGateway-ApiFlavor": api_flavor.value, + } + async with get_llm_semaphore(): response = await self.request_async( "POST", endpoint, json=request_body, - params={"api-version": API_VERSION}, - headers=DEFAULT_LLM_HEADERS, + params={"api-version": api_version}, + headers=headers, ) return ChatCompletion.model_validate(response.json()) @@ -604,3 +665,257 @@ def _convert_tool_to_uipath_format(self, tool: ToolDefinition) -> dict[str, Any] "description": tool.function.description, "parameters": parameters, } + + +class UiPathVertexService(BaseService): + """Service for calling Google Vertex AI models through UiPath's LLM Gateway. + + This service provides access to Google's Gemini models through UiPath's LLM Gateway. + """ + + def __init__( + self, config: UiPathApiConfig, execution_context: UiPathExecutionContext + ) -> None: + super().__init__(config=config, execution_context=execution_context) + + @traced(name="LLM call", run_type="uipath") + async def generate_content( + self, + contents: list[dict[str, Any]], + model: str = GeminiModels.gemini_2_5_flash, + generation_config: dict[str, Any] | None = None, + safety_settings: list[dict[str, Any]] | None = None, + system_instruction: dict[str, Any] | None = None, + tools: list[dict[str, Any]] | None = None, + tool_config: dict[str, Any] | None = None, + api_flavor: APIFlavor = APIFlavor.VERTEX_GEMINI_GENERATE_CONTENT, + ) -> VertexCompletion: + """Generate content using Google Gemini models through UiPath's LLM Gateway. + + This method provides access to Google's Gemini models using the native + Gemini GenerateContent API format. + + Args: + contents (list[dict[str, Any]]): The content to send to the model. + Each item should have 'role' and 'parts' keys, following the + Gemini content format. + model (str, optional): The Gemini model to use. + Defaults to GeminiModels.gemini_2_5_flash. + generation_config (dict[str, Any], optional): Configuration for generation + including temperature, maxOutputTokens, topP, topK, etc. + safety_settings (list[dict[str, Any]], optional): Safety settings to apply. + system_instruction (dict[str, Any], optional): System instruction for the model. + tools (list[dict[str, Any]], optional): Tool definitions for function calling. + tool_config (dict[str, Any], optional): Configuration for tool usage. + api_flavor (APIFlavor, optional): The API flavor to use. + Defaults to APIFlavor.VERTEX_GEMINI_GENERATE_CONTENT. + + Returns: + VertexCompletion: The response from the Gemini API containing + candidates, usage metadata, and other information. + + Examples: + ```python + # Simple text generation + contents = [ + { + "role": "user", + "parts": [{"text": "What is the capital of France?"}] + } + ] + response = await service.generate_content(contents) + + # With generation config + response = await service.generate_content( + contents, + generation_config={ + "temperature": 0.7, + "maxOutputTokens": 1024, + "topP": 0.9 + } + ) + + # With system instruction + response = await service.generate_content( + contents, + system_instruction={ + "parts": [{"text": "You are a helpful assistant."}] + } + ) + ``` + """ + span = trace.get_current_span() + span.set_attribute("model", model) + span.set_attribute("uipath.custom_instrumentation", True) + + endpoint = EndpointManager.get_vendor_endpoint().format( + vendor="vertexai", model=model + ) + endpoint = Endpoint("/" + endpoint) + + request_body: dict[str, Any] = { + "contents": contents, + } + + if generation_config: + request_body["generationConfig"] = generation_config + if safety_settings: + request_body["safetySettings"] = safety_settings + if system_instruction: + request_body["systemInstruction"] = system_instruction + if tools: + request_body["tools"] = tools + if tool_config: + request_body["toolConfig"] = tool_config + + headers = { + **DEFAULT_LLM_HEADERS, + "X-UiPath-LlmGateway-ApiFlavor": api_flavor.value, + } + + async with get_llm_semaphore(): + response = await self.request_async( + "POST", + endpoint, + json=request_body, + headers=headers, + ) + + return VertexCompletion.model_validate(response.json()) + + +class UiPathBedrockService(BaseService): + """Service for calling AWS Bedrock models through UiPath's LLM Gateway. + + This service provides access to AWS Bedrock models UiPath's LLM Gateway. + """ + + def __init__( + self, config: UiPathApiConfig, execution_context: UiPathExecutionContext + ) -> None: + super().__init__(config=config, execution_context=execution_context) + + @traced(name="LLM call", run_type="uipath") + async def converse( + self, + messages: list[dict[str, Any]], + model: str = BedrockModels.anthropic_claude_haiku_4_5, + system: list[dict[str, Any]] | None = None, + inference_config: dict[str, Any] | None = None, + tool_config: dict[str, Any] | None = None, + guardrail_config: dict[str, Any] | None = None, + additional_model_request_fields: dict[str, Any] | None = None, + api_flavor: APIFlavor = APIFlavor.AWS_BEDROCK_CONVERSE, + ) -> BedrockCompletion: + """Generate responses using AWS Bedrock Converse API through UiPath's LLM Gateway. + + This method provides access to AWS Bedrock models using the Converse API format, + which provides a unified interface for different model providers. + + Args: + messages (list[dict[str, Any]]): The messages to send to the model. + Each message should have 'role' and 'content' keys, following + the Bedrock Converse format. + model (str, optional): The Bedrock model to use. + Defaults to BedrockModels.anthropic_claude_haiku_4_5. + system (list[dict[str, Any]], optional): System prompts for the conversation. + inference_config (dict[str, Any], optional): Inference configuration including + maxTokens, temperature, topP, stopSequences. + tool_config (dict[str, Any], optional): Tool configuration for function calling. + guardrail_config (dict[str, Any], optional): Guardrail configuration. + additional_model_request_fields (dict[str, Any], optional): Additional + model-specific request fields. + api_flavor (APIFlavor, optional): The API flavor to use. + Defaults to APIFlavor.AWS_BEDROCK_CONVERSE. + + Returns: + BedrockCompletion: The response from the Bedrock API. Access the text + content directly via the `text` property. + + Examples: + ```python + # Simple conversation + messages = [ + { + "role": "user", + "content": [{"text": "What is the capital of France?"}] + } + ] + response = await service.converse(messages) + + # With system prompt and inference config + response = await service.converse( + messages, + system=[{"text": "You are a helpful assistant."}], + inference_config={ + "maxTokens": 1024, + "temperature": 0.7, + "topP": 0.9 + } + ) + + # With tool configuration + response = await service.converse( + messages, + tool_config={ + "tools": [ + { + "toolSpec": { + "name": "get_weather", + "description": "Get the weather for a location", + "inputSchema": { + "json": { + "type": "object", + "properties": { + "location": {"type": "string"} + }, + "required": ["location"] + } + } + } + } + ] + } + ) + ``` + """ + span = trace.get_current_span() + span.set_attribute("model", model) + span.set_attribute("uipath.custom_instrumentation", True) + + endpoint = EndpointManager.get_vendor_endpoint().format( + vendor="awsbedrock", model=model + ) + endpoint = Endpoint("/" + endpoint) + + request_body: dict[str, Any] = { + "messages": messages, + } + + if system: + request_body["system"] = system + if inference_config: + request_body["inferenceConfig"] = inference_config + if tool_config: + request_body["toolConfig"] = tool_config + if guardrail_config: + request_body["guardrailConfig"] = guardrail_config + if additional_model_request_fields: + request_body["additionalModelRequestFields"] = ( + additional_model_request_fields + ) + + headers = { + **DEFAULT_LLM_HEADERS, + "X-UiPath-LlmGateway-ApiFlavor": api_flavor.value, + } + + async with get_llm_semaphore(): + response = await self.request_async( + "POST", + endpoint, + json=request_body, + headers=headers, + ) + + return BedrockCompletion.model_validate(response.json()) diff --git a/src/uipath/platform/chat/llm_gateway.py b/src/uipath/platform/chat/llm_gateway.py index 0223bd4d3..741b8523f 100644 --- a/src/uipath/platform/chat/llm_gateway.py +++ b/src/uipath/platform/chat/llm_gateway.py @@ -126,3 +126,76 @@ class ChatCompletion(BaseModel): model: str choices: List[ChatCompletionChoice] usage: ChatCompletionUsage + + +class VertexPart(BaseModel): + """Model representing a part in a Vertex AI response.""" + + text: Optional[str] = None + + +class VertexContent(BaseModel): + """Model representing content in a Vertex AI response.""" + + role: str + parts: List[VertexPart] + + +class VertexCandidate(BaseModel): + """Model representing a candidate in a Vertex AI response.""" + + content: VertexContent + finishReason: Optional[str] = None + avgLogprobs: Optional[float] = None + + +class VertexUsageMetadata(BaseModel): + """Model representing usage metadata in a Vertex AI response.""" + + promptTokenCount: Optional[int] = None + candidatesTokenCount: Optional[int] = None + totalTokenCount: Optional[int] = None + + +class VertexCompletion(BaseModel): + """Model representing a Vertex AI (Gemini) completion response.""" + + candidates: List[VertexCandidate] + usageMetadata: Optional[VertexUsageMetadata] = None + modelVersion: Optional[str] = None + + +class BedrockContentBlock(BaseModel): + """Model representing a content block in a Bedrock response.""" + + text: Optional[str] = None + + +class BedrockMessage(BaseModel): + """Model representing a message in a Bedrock response.""" + + role: str + content: List[BedrockContentBlock] + + +class BedrockOutput(BaseModel): + """Model representing output in a Bedrock response.""" + + message: BedrockMessage + + +class BedrockUsage(BaseModel): + """Model representing usage statistics in a Bedrock response.""" + + inputTokens: Optional[int] = None + outputTokens: Optional[int] = None + totalTokens: Optional[int] = None + + +class BedrockCompletion(BaseModel): + """Model representing an AWS Bedrock completion response.""" + + output: BedrockOutput + stopReason: Optional[str] = None + usage: Optional[BedrockUsage] = None + metrics: Optional[Dict[str, Any]] = None diff --git a/testcases/apicalls-testcase/main.py b/testcases/apicalls-testcase/main.py index fe8812f9c..92bf4434d 100644 --- a/testcases/apicalls-testcase/main.py +++ b/testcases/apicalls-testcase/main.py @@ -6,7 +6,7 @@ from uipath.platform.action_center import TasksService from uipath.platform.connections import ConnectionsService from uipath.platform.context_grounding import ContextGroundingService -from uipath.platform.chat import ConversationsService +from uipath.platform.chat import ConversationsService, UiPathVertexService, UiPathBedrockService, GeminiModels, BedrockModels, APIFlavor from uipath.platform.documents import DocumentsService from uipath.platform.entities import EntitiesService from uipath.platform.resource_catalog import ResourceCatalogService @@ -36,6 +36,62 @@ async def test_llm(sdk: UiPath): "LLM Normalized Response: %s", result_normalized.choices[0].message.content ) + +async def test_llm_vertex(sdk: UiPath): + contents = [ + { + "role": "user", + "parts": [{"text": "What is the capital of France? Answer in one word."}] + } + ] + + result = await sdk.llm_vertex.generate_content( + contents, + model=GeminiModels.gemini_2_5_flash, + generation_config={ + "temperature": 0.7, + "maxOutputTokens": 100, + } + ) + logger.info("LLM Vertex Response: %s", result) + + +async def test_llm_bedrock(sdk: UiPath): + messages = [ + { + "role": "user", + "content": [{"text": "What is the capital of France? Answer in one word."}] + } + ] + + result_converse = await sdk.llm_bedrock.converse( + messages, + model=BedrockModels.anthropic_claude_haiku_4_5, + inference_config={ + "maxTokens": 100, + "temperature": 0.7, + } + ) + logger.info("LLM Bedrock Converse Response: %s", result_converse) + + messages_invoke = [ + { + "role": "user", + "content": [{"text": "What is the capital of Germany? Answer in one word."}] + } + ] + + result_invoke = await sdk.llm_bedrock.converse( + messages_invoke, + model=BedrockModels.anthropic_claude_haiku_4_5, + inference_config={ + "maxTokens": 100, + "temperature": 0.7, + }, + api_flavor=APIFlavor.AWS_BEDROCK_INVOKE, + ) + logger.info("LLM Bedrock Invoke Response: %s", result_invoke) + async def test_imports(sdk: UiPath): logger.info("BucketsService imported: %s", BucketsService) logger.info("QueuesService imported: %s", QueuesService) @@ -52,6 +108,10 @@ async def test_imports(sdk: UiPath): logger.info("ProcessesService imported: %s", ProcessesService) logger.info("ResourceCatalogService imported: %s", ResourceCatalogService) logger.info("TasksService imported: %s", TasksService) + logger.info("UiPathVertexService imported: %s", UiPathVertexService) + logger.info("UiPathBedrockService imported: %s", UiPathBedrockService) + logger.info("GeminiModels imported: %s", GeminiModels) + logger.info("BedrockModels imported: %s", BedrockModels) logger.info("Imports test passed.") @dataclass @@ -68,6 +128,8 @@ async def main(input: EchoIn) -> EchoOut: sdk = UiPath() await test_llm(sdk) + await test_llm_vertex(sdk) + await test_llm_bedrock(sdk) await test_imports(sdk) return EchoOut(message=input.message) diff --git a/uv.lock b/uv.lock index b3289b67b..88710ccf2 100644 --- a/uv.lock +++ b/uv.lock @@ -2491,7 +2491,7 @@ wheels = [ [[package]] name = "uipath" -version = "2.6.22" +version = "2.6.23" source = { editable = "." } dependencies = [ { name = "applicationinsights" },