tutorial to switch different thinking modes with crewai
This commit is contained in:
592
llm_tracing.ipynb
Normal file
592
llm_tracing.ipynb
Normal file
@@ -0,0 +1,592 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPUhTA5YLOC5sFoz0nC/16T",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/casualcomputer/llm_google_colab/blob/main/llm_tracing.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "NFsmQ1KlioR0",
|
||||
"outputId": "bb3fe022-a329-4feb-e6f3-d8f2577ffa2a"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/1.3 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
||||
"\u001b[?25h\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
||||
"langchain-google-genai 2.1.5 requires google-ai-generativelanguage<0.7.0,>=0.6.18, but you have google-ai-generativelanguage 0.6.15 which is incompatible.\u001b[0m\u001b[31m\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Install dependencies\n",
|
||||
"!pip install --upgrade pip --q\n",
|
||||
"!pip install langchain google-generativeai torch torchvision transformers datasets langchain_community --q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Imports & Authentication\n",
|
||||
"import os\n",
|
||||
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
|
||||
"from langchain.agents import initialize_agent, Tool, AgentType\n",
|
||||
"from datasets import load_dataset\n",
|
||||
"import re\n",
|
||||
"import pandas as pd\n",
|
||||
"from google.colab import userdata\n",
|
||||
"\n",
|
||||
"api_key = userdata.get('GOOGLE_API_KEY') # Use the name you gave your secret\n",
|
||||
"\n",
|
||||
"if api_key:\n",
|
||||
" # Use the api_key\n",
|
||||
" print(\"API key loaded successfully from Colab Secrets.\")\n",
|
||||
" # print(f\"Your API key is: {api_key}\") # Be careful printing the key itself\n",
|
||||
"else:\n",
|
||||
" print(\"API key 'GOOGLE_API_KEY' not found in Colab Secrets.\")"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "c1bumpHjls2s",
|
||||
"outputId": "0f17bceb-64a6-4ca2-a4c4-8b22b6dc8234"
|
||||
},
|
||||
"execution_count": 9,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"API key loaded successfully from Colab Secrets.\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!pip install langchain_google_genai --quiet\n",
|
||||
"import os\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-1.5-flash-latest\", # You can also try \"gemini-pro\"\n",
|
||||
" temperature=0,\n",
|
||||
" google_api_key=api_key\n",
|
||||
" )\n",
|
||||
"print(\"LLM Initialized successfully.\")\n",
|
||||
"\n",
|
||||
"print(\"\\nSending a test prompt to the LLM...\")\n",
|
||||
"print(type(llm))\n",
|
||||
"print(llm.invoke(\"Write me a ballad about LangChain\").content)"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "-USI0cHo26zz",
|
||||
"outputId": "d8066617-9f53-4f18-87c7-bae224024957"
|
||||
},
|
||||
"execution_count": 10,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
||||
"google-generativeai 0.8.5 requires google-ai-generativelanguage==0.6.15, but you have google-ai-generativelanguage 0.6.18 which is incompatible.\u001b[0m\u001b[31m\n",
|
||||
"\u001b[0mLLM Initialized successfully.\n",
|
||||
"\n",
|
||||
"Sending a test prompt to the LLM...\n",
|
||||
"<class 'langchain_google_genai.chat_models.ChatGoogleGenerativeAI'>\n",
|
||||
"(Verse 1)\n",
|
||||
"The data streams, a raging flood,\n",
|
||||
"A chaos vast, misunderstood.\n",
|
||||
"To tame its power, find its core,\n",
|
||||
"A programmer sought, and something more.\n",
|
||||
"He heard a whisper, on the breeze,\n",
|
||||
"Of LangChain's promise, meant to please.\n",
|
||||
"A framework new, a shining light,\n",
|
||||
"To bring the darkness into sight.\n",
|
||||
"\n",
|
||||
"(Verse 2)\n",
|
||||
"With LLMs vast, and prompts so keen,\n",
|
||||
"He built a chain, a wondrous scene.\n",
|
||||
"From prompt engineering's subtle art,\n",
|
||||
"He crafted queries, played his part.\n",
|
||||
"The chains he forged, both long and deep,\n",
|
||||
"Where memories slept, and secrets keep.\n",
|
||||
"He linked the models, one by one,\n",
|
||||
"Until the task was nearly done.\n",
|
||||
"\n",
|
||||
"(Verse 3)\n",
|
||||
"The agents danced, a graceful sway,\n",
|
||||
"Through complex tasks, they found their way.\n",
|
||||
"They searched the web, with tireless might,\n",
|
||||
"And brought forth knowledge, pure and bright.\n",
|
||||
"From simple questions, answers flowed,\n",
|
||||
"A tapestry of facts bestowed.\n",
|
||||
"The chatbot spoke, with human grace,\n",
|
||||
"And wore a smile upon its face.\n",
|
||||
"\n",
|
||||
"(Verse 4)\n",
|
||||
"But challenges arose, a thorny plight,\n",
|
||||
"The chains would break, in darkest night.\n",
|
||||
"The models faltered, lost their way,\n",
|
||||
"And truth was buried, far away.\n",
|
||||
"He toiled and struggled, day and night,\n",
|
||||
"To mend the breaks, and set things right.\n",
|
||||
"With careful tuning, fine-tuned art,\n",
|
||||
"He strengthened chains, and played his part.\n",
|
||||
"\n",
|
||||
"(Verse 5)\n",
|
||||
"At last, success, a joyful sound,\n",
|
||||
"The data tamed, on solid ground.\n",
|
||||
"The LangChain hummed, a gentle song,\n",
|
||||
"Where knowledge flowed, both right and strong.\n",
|
||||
"A testament to skill and grace,\n",
|
||||
"A framework built, to find its place.\n",
|
||||
"So raise a glass, to this new age,\n",
|
||||
"Where LangChain guides, upon life's stage.\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Agent"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "oeiqmV6BaPVy"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!pip install crewai crewai_tools --quiet\n",
|
||||
"\n",
|
||||
"from crewai import Agent, Task, Crew, LLM\n",
|
||||
"\n",
|
||||
"# 1) Define a simple search tool (or replace with your real search API)\n",
|
||||
"def search_tool(query: str) -> str:\n",
|
||||
" # Right now it’s a stub. In production you might call Wikipedia, SerpAPI, etc.\n",
|
||||
" return \"Ottawa (capital of Canada), population ~1 million\"\n",
|
||||
"\n",
|
||||
"# 2) Instantiate your CrewAI Agent with the Search tool registered\n",
|
||||
"agent_react = Agent(\n",
|
||||
" role=\"Factual Researcher\",\n",
|
||||
" goal=\"Retrieve factual snippets for user queries\",\n",
|
||||
" backstory=\"Always think step-by-step and use external tools when needed.\",\n",
|
||||
" verbose=True, # prints Thought/Action/Observation logs\n",
|
||||
" allow_delegation=False,\n",
|
||||
" llm=llm\n",
|
||||
" #, tools=[\n",
|
||||
" # {\"name\": \"Search\", \"func\": search_tool, \"description\": \"Lookup facts online\"}\n",
|
||||
" # ]\n",
|
||||
")\n"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "_wzMmyp-c1Ih",
|
||||
"outputId": "b32174d0-182f-4543-bc25-caca26747154"
|
||||
},
|
||||
"execution_count": 13,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# 3. Define all your prompt templates from Section 1\n",
|
||||
"ZERO_SHOT_TEMPLATE = \"You are a helpful assistant. Answer concisely:\\n\\n{query}\"\n",
|
||||
"FEW_SHOT_TEMPLATE = (\n",
|
||||
" \"You are a helpful assistant. Use the examples below to guide your answer.\\n\\n\"\n",
|
||||
" \"Example 1:\\nQ: Who wrote 'Pride and Prejudice'?\\nA: Jane Austen\\n\\n\"\n",
|
||||
" \"Example 2:\\nQ: What is the boiling point of water in Celsius?\\nA: 100°C\\n\\n\"\n",
|
||||
" \"Now answer:\\nQ: {query}\\nA:\"\n",
|
||||
")\n",
|
||||
"COT_TEMPLATE = (\n",
|
||||
" \"You are a reasoning assistant. Think step by step:\\n\\n\"\n",
|
||||
" \"Question: {query}\\n\"\n",
|
||||
" \"Answer: Let’s think step by step.\"\n",
|
||||
")\n",
|
||||
"COT_SC_TEMPLATE = (\n",
|
||||
" \"You are a reasoning assistant. Think carefully, step by step:\\n\\n\"\n",
|
||||
" \"Question: {query}\\n\"\n",
|
||||
" \"Answer:\"\n",
|
||||
")\n",
|
||||
"REACT_TEMPLATE = (\n",
|
||||
" \"You are a multi-agent reasoning assistant. Follow the ReAct format exactly:\\n\\n\"\n",
|
||||
" \"Thought: I need to gather facts for: {query}\\n\"\n",
|
||||
" \"Action: Search[\\\"{query}\\\"]\\n\"\n",
|
||||
" \"Observation:\\n\"\n",
|
||||
" \"Thought: Now I have the necessary information.\\n\"\n",
|
||||
" \"Answer:\"\n",
|
||||
")\n",
|
||||
"RAG_TEMPLATE = (\n",
|
||||
" \"You have been provided the following context passages:\\n\\n{retrieved_docs}\\n\\n\"\n",
|
||||
" \"Using only the above context, answer:\\n\\n{query}\"\n",
|
||||
")\n",
|
||||
"CHAIN_STEP1 = (\n",
|
||||
" \"Extract key facts needed to answer:\\n\\n\"\n",
|
||||
" \"Question: {query}\\n\"\n",
|
||||
" \"Facts:\"\n",
|
||||
")\n",
|
||||
"CHAIN_STEP2 = (\n",
|
||||
" \"You have these facts:\\n\\n{facts}\\n\\n\"\n",
|
||||
" \"Now answer:\\n\\n\"\n",
|
||||
" \"Question: {query}\\n\"\n",
|
||||
" \"Answer:\"\n",
|
||||
")\n",
|
||||
"META_PROMPT = (\n",
|
||||
" \"You are a prompt engineer. Given the user’s instruction, produce an optimized prompt:\\n\\n\"\n",
|
||||
" \"User instruction: {user_instruction}\\n\\n\"\n",
|
||||
" \"Optimized prompt:\"\n",
|
||||
")\n",
|
||||
"PAL_TEMPLATE = (\n",
|
||||
" \"You are a Python interpreter. Write Python code that solves:\\n\\n\"\n",
|
||||
" \"{query}\\n\\n\"\n",
|
||||
" \"# After writing the code, execute it mentally and provide the result.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# 4. Build a dispatcher function to select the right template\n",
|
||||
"def build_prompt(query: str, mode: str, **kwargs) -> str:\n",
|
||||
" if mode == \"zero_shot\":\n",
|
||||
" return ZERO_SHOT_TEMPLATE.format(query=query)\n",
|
||||
" elif mode == \"few_shot\":\n",
|
||||
" return FEW_SHOT_TEMPLATE.format(query=query)\n",
|
||||
" elif mode == \"cot\":\n",
|
||||
" return COT_TEMPLATE.format(query=query)\n",
|
||||
" elif mode == \"cot_sc\":\n",
|
||||
" return COT_SC_TEMPLATE.format(query=query)\n",
|
||||
" elif mode == \"react\":\n",
|
||||
" return REACT_TEMPLATE.format(query=query)\n",
|
||||
" elif mode == \"rag\":\n",
|
||||
" return RAG_TEMPLATE.format(retrieved_docs=kwargs.get(\"retrieved_docs\", \"\"), query=query)\n",
|
||||
" elif mode == \"chain_step1\":\n",
|
||||
" return CHAIN_STEP1.format(query=query)\n",
|
||||
" elif mode == \"chain_step2\":\n",
|
||||
" return CHAIN_STEP2.format(query=query, facts=kwargs.get(\"facts\", \"\"))\n",
|
||||
" elif mode == \"meta\":\n",
|
||||
" return META_PROMPT.format(user_instruction=query)\n",
|
||||
" elif mode == \"pal\":\n",
|
||||
" return PAL_TEMPLATE.format(query=query)\n",
|
||||
" else:\n",
|
||||
" raise ValueError(f\"Unknown mode: {mode}\")\n",
|
||||
"\n",
|
||||
"# # 5. Helper to call either the Agent (for ReAct) or bare LLM\n",
|
||||
"# def call_with_mode(query: str, mode: str, **kwargs) -> str:\n",
|
||||
"# prompt = build_prompt(query, mode, **kwargs)\n",
|
||||
"# if mode == \"react\":\n",
|
||||
"# response = agent_react.llm.invoke(prompt)\n",
|
||||
"# return response.content.strip()\n",
|
||||
"# else:\n",
|
||||
"# response = llm.invoke(prompt)\n",
|
||||
"# return response.content.strip()\n",
|
||||
"\n",
|
||||
"# # 5. Helper to call either the Agent (for ReAct) or bare LLM\n",
|
||||
"# def call_with_mode(query: str, mode: str, **kwargs) -> str:\n",
|
||||
"# prompt = build_prompt(query, mode, **kwargs)\n",
|
||||
"\n",
|
||||
"# if mode == \"react\":\n",
|
||||
"# # “Call” the agent’s LLM so that Search[…] actually invokes your registered tool\n",
|
||||
"# ai_msg = agent_react.llm(prompt)\n",
|
||||
"# return ai_msg.content.strip()\n",
|
||||
"# else:\n",
|
||||
"# # “Call” the bare LLM for all other modes\n",
|
||||
"# ai_msg = llm(prompt)\n",
|
||||
"# return ai_msg.content.strip()\n",
|
||||
"# Solution 2\n",
|
||||
"# # ------- helper that always extracts text -------\n",
|
||||
"# def _to_text(resp):\n",
|
||||
"# \"\"\"\n",
|
||||
"# CrewAI/Gemini may return either:\n",
|
||||
"# • a plain str (newer versions)\n",
|
||||
"# • an AIMessage (older LangChain chat models)\n",
|
||||
"# This helper normalises both to a clean string.\n",
|
||||
"# \"\"\"\n",
|
||||
"# from langchain_core.messages import BaseMessage # only used for isinstance check\n",
|
||||
"# if isinstance(resp, str):\n",
|
||||
"# return resp\n",
|
||||
"# elif isinstance(resp, BaseMessage): # covers AIMessage, HumanMessage, etc.\n",
|
||||
"# return resp.content\n",
|
||||
"# else:\n",
|
||||
"# # Fallback: best-effort string conversion\n",
|
||||
"# return str(resp)\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import BaseMessage # for the isinstance check\n",
|
||||
"\n",
|
||||
"def _to_text(resp) -> str:\n",
|
||||
" \"\"\"Normalise CrewAI responses to a clean string.\"\"\"\n",
|
||||
" if isinstance(resp, str):\n",
|
||||
" return resp\n",
|
||||
" if isinstance(resp, BaseMessage):\n",
|
||||
" return resp.content\n",
|
||||
" return str(resp) # fallback\n",
|
||||
"\n",
|
||||
"def smart_llm_call(model, prompt):\n",
|
||||
" \"\"\"\n",
|
||||
" Call the LLM using whichever entry-point the installed\n",
|
||||
" CrewAI / LangChain version supports.\n",
|
||||
" \"\"\"\n",
|
||||
" for m in (\"invoke\", \"predict\", \"__call__\"):\n",
|
||||
" if hasattr(model, m):\n",
|
||||
" resp = getattr(model, m)(prompt) # call the method\n",
|
||||
" return _to_text(resp)\n",
|
||||
" raise AttributeError(\"LLM exposes no invoke/predict/__call__\")\n",
|
||||
"\n",
|
||||
"# ------------------ MAIN DISPATCHER ------------------\n",
|
||||
"def call_with_mode(query: str, mode: str, **kwargs) -> str:\n",
|
||||
" prompt = build_prompt(query, mode, **kwargs)\n",
|
||||
" if mode == \"react\": # needs the Search tool\n",
|
||||
" return smart_llm_call(agent_react.llm, prompt).strip()\n",
|
||||
" else: # every other prompting style\n",
|
||||
" return smart_llm_call(llm, prompt).strip()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# # ------- ONE call-site for every prompting mode -------\n",
|
||||
"# def call_with_mode(query: str, mode: str, **kwargs) -> str:\n",
|
||||
"# prompt = build_prompt(query, mode, **kwargs)\n",
|
||||
"\n",
|
||||
"# if mode == \"react\":\n",
|
||||
"# # ReAct needs the agent so the Search tool can fire\n",
|
||||
"# resp = agent_react.llm.invoke(prompt) # ✅ modern, no deprecation warning\n",
|
||||
"# else:\n",
|
||||
"# resp = llm.invoke(prompt) # ✅ modern, no deprecation warning\n",
|
||||
"\n",
|
||||
"# return _to_text(resp).strip()\n",
|
||||
"\n",
|
||||
"# 6. Example Calls\n",
|
||||
"query_example = \"What is the capital of Canada and its population?\"\n",
|
||||
"\n",
|
||||
"# 6A. Zero-Shot\n",
|
||||
"res_zero = call_with_mode(query_example, mode=\"zero_shot\")\n",
|
||||
"print(\"Zero-Shot:\", res_zero) # :contentReference[oaicite:24]{index=24}\n",
|
||||
"print(\"...................\")\n",
|
||||
"\n",
|
||||
"# 6B. Few-Shot\n",
|
||||
"res_few = call_with_mode(query_example, mode=\"few_shot\")\n",
|
||||
"print(\"Few-Shot:\", res_few) # :contentReference[oaicite:25]{index=25}\n",
|
||||
"print(\"...................\")\n",
|
||||
"\n",
|
||||
"# 6C. Chain-of-Thought (CoT)\n",
|
||||
"res_cot = call_with_mode(query_example, mode=\"cot\")\n",
|
||||
"print(\"CoT:\", res_cot) # :contentReference[oaicite:26]{index=26}\n",
|
||||
"print(\"...................\")\n",
|
||||
"\n",
|
||||
"# 6D. Self-Consistency (CoT-SC) (voting over 3 runs)\n",
|
||||
"cot_sc_chains = [call_with_mode(query_example, mode=\"cot_sc\") for _ in range(3)]\n",
|
||||
"finals = [chain.split(\"\\n\")[-1].strip() for chain in cot_sc_chains if \"Answer:\" in chain]\n",
|
||||
"from collections import Counter\n",
|
||||
"res_cot_sc = Counter(finals).most_common(1)[0][0] if finals else \"\"\n",
|
||||
"print(\"CoT-SC Vote:\", res_cot_sc) # :contentReference[oaicite:27]{index=27}\n",
|
||||
"print(\"...................\")\n",
|
||||
"\n",
|
||||
"# # 6E. ReAct (tool call)\n",
|
||||
"# res_react = call_with_mode(query_example, mode=\"react\")\n",
|
||||
"# print(\"ReAct:\", res_react) # :contentReference[oaicite:28]{index=28}\n",
|
||||
"\n",
|
||||
"# 6G. Prompt Chaining (Two-Step)\n",
|
||||
"facts = call_with_mode(query_example, mode=\"chain_step1\")\n",
|
||||
"print(\"Chained Facts:\", facts) # :contentReference[oaicite:30]{index=30}\n",
|
||||
"res_chain_final = call_with_mode(query_example, mode=\"chain_step2\", facts=facts)\n",
|
||||
"print(\"Chained Final:\", res_chain_final) # :contentReference[oaicite:31]{index=31}\n",
|
||||
"print(\"...................\")\n",
|
||||
"\n",
|
||||
"# 6H. Meta Prompting\n",
|
||||
"opt_prompt = call_with_mode(\"Optimize a prompt for: \" + query_example, mode=\"meta\")\n",
|
||||
"print(\"Meta Optimized Prompt:\", opt_prompt) # :contentReference[oaicite:32]{index=32}\n",
|
||||
"print(\"...................\")\n",
|
||||
"\n",
|
||||
"# 6I. Program-Aided Language Model (PAL)\n",
|
||||
"res_pal = call_with_mode(query_example, mode=\"pal\")\n",
|
||||
"print(\"PAL:\", res_pal) #\n",
|
||||
"print(\"...................\")\n",
|
||||
"\n",
|
||||
"# # 6F. Retrieval-Augmented Generation (RAG)\n",
|
||||
"# # Assume retrieve_docs is implemented elsewhere\n",
|
||||
"# def retrieve_docs(q: str, k: int = 3) -> str:\n",
|
||||
"# # E.g., use a FAISS vector store or Wikipedia API\n",
|
||||
"# return \"Ottawa is the capital of Canada. The population is ~1 million.\"\n",
|
||||
"\n",
|
||||
"# docs = retrieve_docs(query_example, k=3)\n",
|
||||
"# res_rag = call_with_mode(query_example, mode=\"rag\", retrieved_docs=docs)\n",
|
||||
"# print(\"RAG:\", res_rag) # :contentReference[oaicite:29]{index=29}"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "9GOl2_KBHrBf",
|
||||
"outputId": "9c677341-22c6-4b2b-b455-d8cefa219b17"
|
||||
},
|
||||
"execution_count": 25,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Zero-Shot: Ottawa; approximately 1 million.\n",
|
||||
"...................\n",
|
||||
"Few-Shot: The capital of Canada is Ottawa. Its population is approximately 1 million.\n",
|
||||
"...................\n",
|
||||
"CoT: Let’s think step by step.\n",
|
||||
"\n",
|
||||
"Step 1: Identify the question. The question asks for two pieces of information: the capital city of Canada and its population.\n",
|
||||
"\n",
|
||||
"Step 2: Determine the capital city. The capital city of Canada is Ottawa.\n",
|
||||
"\n",
|
||||
"Step 3: Determine the population. The population of Ottawa is not a fixed number as it changes constantly. To answer accurately, we need to specify a time frame (e.g., the population as of a specific year or a recent estimate). A quick online search would provide the most up-to-date information.\n",
|
||||
"\n",
|
||||
"Step 4: Combine the information. Once a reliable population figure for Ottawa is found (e.g., from Statistics Canada or a reputable news source), the complete answer can be formulated as: \"The capital of Canada is Ottawa. [Insert population figure here] is its approximate population as of [Insert date here].\"\n",
|
||||
"...................\n",
|
||||
"CoT-SC Vote: Answer: The capital of Canada is Ottawa. Its population is approximately 1 million people (this is an approximation and requires a source and year for accuracy).\n",
|
||||
"...................\n",
|
||||
"Chained Facts: To answer the question, you need these facts:\n",
|
||||
"\n",
|
||||
"* **Capital of Canada:** [Insert City Name Here]\n",
|
||||
"* **Population of the capital city:** [Insert Population Number Here]\n",
|
||||
"Chained Final: To answer the question, please provide the missing information: the capital city of Canada and its population.\n",
|
||||
"...................\n",
|
||||
"Meta Optimized Prompt: What is the capital city of Canada and what is its current population?\n",
|
||||
"...................\n",
|
||||
"PAL: I can't access real-time information, including databases or the internet, to get the current population of Ottawa. Therefore, I'll provide code that would work if I *had* access to such data, and then give a reasonable approximation based on my knowledge.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import requests # This would be needed for real-world data fetching\n",
|
||||
"\n",
|
||||
"def get_capital_and_population(country):\n",
|
||||
" \"\"\"\n",
|
||||
" Fetches the capital and population of a given country. This is a simplified example and \n",
|
||||
" would require a robust error handling mechanism in a real-world application.\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" # In a real application, you'd use a reliable API like REST Countries API\n",
|
||||
" # This is a placeholder. Replace with actual API call.\n",
|
||||
" response = requests.get(f\"https://example.com/api/country/{country}\") # Placeholder API\n",
|
||||
" data = response.json()\n",
|
||||
" capital = data[\"capital\"]\n",
|
||||
" population = data[\"population\"]\n",
|
||||
" return capital, population\n",
|
||||
" except requests.exceptions.RequestException as e:\n",
|
||||
" return None, f\"Error fetching data: {e}\"\n",
|
||||
" except KeyError as e:\n",
|
||||
" return None, f\"Data format error: {e}\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"country = \"Canada\"\n",
|
||||
"capital, population = get_capital_and_population(country)\n",
|
||||
"\n",
|
||||
"if capital:\n",
|
||||
" print(f\"The capital of {country} is {capital}.\")\n",
|
||||
" print(f\"Its population is approximately {population}.\") # Note: approximation\n",
|
||||
"else:\n",
|
||||
" print(population) # Print the error message\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"**Mental Execution and Result (Approximation):**\n",
|
||||
"\n",
|
||||
"The code above uses a placeholder API call. In a real execution, it would make a network request to an API providing country information. Since I cannot do that, I'll provide a result based on my knowledge:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"The capital of Canada is Ottawa.\n",
|
||||
"Its population is approximately 1000000. (Note: This is a rough estimate. The actual population fluctuates.)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"The actual population of Ottawa would need to be looked up from a reliable source like Statistics Canada. My estimate is significantly less precise than what a real API call would provide.\n",
|
||||
"...................\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"query = \"What is the capital of Australia and its population?\"\n",
|
||||
"for mode in [\"zero_shot\", \"few_shot\", \"cot\", \"cot_sc\"]:\n",
|
||||
" print(f\"{mode:<8} →\", call_with_mode(query, mode))"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "KNx1KBs-ieev",
|
||||
"outputId": "7ea0ac6a-f892-48b6-901c-b46bf0aa0fab"
|
||||
},
|
||||
"execution_count": 28,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"zero_shot → Canberra; approximately 430,000\n",
|
||||
"few_shot → A: The capital of Australia is Canberra. Its population is approximately 430,000.\n",
|
||||
"cot → Let’s think step by step.\n",
|
||||
"\n",
|
||||
"Step 1: Identify the country in question. The question asks about the capital of Australia.\n",
|
||||
"\n",
|
||||
"Step 2: Determine the capital city. The capital city of Australia is Canberra.\n",
|
||||
"\n",
|
||||
"Step 3: Find the population of the capital city. A quick online search reveals that the population of Canberra is approximately 430,000 (this number fluctuates and depends on the source and year).\n",
|
||||
"\n",
|
||||
"Step 4: Combine the findings. Therefore, the capital of Australia is Canberra, and its population is approximately 430,000.\n",
|
||||
"cot_sc → Step 1: Identify the question. The question asks for two pieces of information: the capital city of Australia and its population.\n",
|
||||
"\n",
|
||||
"Step 2: Determine the capital city. The capital city of Australia is Canberra.\n",
|
||||
"\n",
|
||||
"Step 3: Find the population. A quick online search reveals that the population of Canberra fluctuates, but a reasonable approximation is around 450,000 people. (Note: This number is approximate and changes constantly).\n",
|
||||
"\n",
|
||||
"Step 4: Combine the information.\n",
|
||||
"\n",
|
||||
"Answer: The capital of Australia is Canberra, with a population of approximately 450,000.\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user