nodes:
# Determine temperature based on query type
temperature_selector:
type: LanguageModelStep
config:
prompt_template: |
Analyze this query and return a temperature value between 0 and 1.
For factual queries, use 0.1. For creative queries, use 0.8.
Query: {query}
Return only the number.
model_id: "vertex_ai/gemini-2.0-flash"
input_mapping:
query: __inputs__#query
generate:
type: GenerationStep
config_overrides:
temperature: temperature_selector#response
input_mapping:
message_history: create_message_history#message_history