ComenzarEmpieza gratis

Building the Message and Calling the LLM

With your get_context_from_mcp(user_query) helper function created to return the resource text and prompt text, it's time to pass that information to the LLM!

The currency server, get_context_from_mcp(), get_tools_from_mcp(), call_mcp_tool(), and the OpenAI client are set up in the background. You need to complete the function that builds the system content, calls the model, and handles either a direct message or a tool call. You've been provided with an ambiguous and an unambiguous user input to see if your MCP prompts made the difference!

Este ejercicio forma parte del curso

Introduction to Model Context Protocol (MCP)

Ver curso

Instrucciones del ejercicio

  • On line 37, build full_prompt by concatenating prompt_text, the string "\n\nSupported currencies:\n", and resource_text.
  • On line 44, send full_prompt and the openai_tools list to the model.
  • On lines 53-55, if the output type is "message", return str(output.content[0].text).
  • On lines 58-60, if the output type is "function_call", pass the .name attribute of output and the arguments returned by json.loads(output.arguments) to call_mcp_tool().

Ejercicio interactivo práctico

Prueba este ejercicio y completa el código de muestra.

from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client

async def get_context_from_mcp(user_query: str) -> tuple[str, str]:
    params = StdioServerParameters(command=sys.executable, args=["currency_server.py"])
    async with stdio_client(params) as (reader, writer):
        async with ClientSession(reader, writer) as session:
            await session.initialize()
            resource_result = await session.read_resource("file://currencies.txt")
            resource_text = resource_result.contents[0].text
            prompt_result = await session.get_prompt("convert_currency_prompt",
                arguments={"currency_request": user_query})
            prompt_text = prompt_result.messages[0].content.text
            return resource_text, prompt_text

async def get_tools_from_mcp():
    params = StdioServerParameters(command=sys.executable, args=["currency_server.py"])
    async with stdio_client(params) as (reader, writer):
        async with ClientSession(reader, writer) as session:
            await session.initialize()
            response = await session.list_tools()
            return response.tools

async def call_mcp_tool(tool_name: str, arguments: dict) -> str:
    params = StdioServerParameters(command=sys.executable, args=["currency_server.py"])
    async with stdio_client(params) as (reader, writer):
        async with ClientSession(reader, writer) as session:
            await session.initialize()
            result = await session.call_tool(tool_name, arguments)
            return str(result.content[0].text)

async def call_llm_with_context(user_query: str):
    """Call the LLM with resource and prompt context from MCP."""
    resource_text, prompt_text = await get_context_from_mcp(user_query)

    # Combine the resource and prompt text
    full_prompt = ____ + "\n\nSupported currencies:\n" + ____

    client = AsyncOpenAI(api_key="")
    mcp_tools = await get_tools_from_mcp()
    openai_tools = [{"type": "function", "name": t.name, "description": t.description or "", "parameters": t.inputSchema} for t in mcp_tools]

    # Send full_prompt and the tools list to the model
    response = await client.responses.create(
        model="gpt-4o-mini",
        input=____,
        tools=openai_tools,
    )

    output = response.output[0]

    # Return the text response
    if output.type == "____":
        print(f"\nAssistant: {output.content[0].text}")
        return str(____)

    # Call the tool requested in the LLM's function call
    if output.type == "____":
        args = json.loads(output.arguments)
        result = await call_mcp_tool(____.name, ____)
        followup = await client.responses.create(
            model="gpt-4o-mini",
            input=[
                {"role": "user", "content": user_query},
                output,
                {"type": "function_call_output", "call_id": output.call_id, "output": result},
            ],
        )
        if followup.output and followup.output[0].type == "message":
            print(f"\nAssistant: {followup.output[0].content[0].text}")
            return str(followup.output[0].content[0].text)

print("=== Ambiguous request (prompt asks for clarification) ===")
asyncio.run(call_llm_with_context("Convert some euros to dollars"))
print("\n=== Unambiguous request (model calls tool) ===")
asyncio.run(call_llm_with_context("How much is 50 GBP in euros?"))
Editar y ejecutar código