{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Use this to design AI agent as a POC\n", "

初始化

" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 读取环境变量\n", "from dotenv import load_dotenv\n", "load_dotenv() # 加载 .env 文件" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "
\n", "

使用通义千问作为LLM

" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 使用通义千问\n", "from langchain_community.llms import Tongyi\n", "\n", "import os\n", "\n", "# 初始化通义模型(以qwen-max为例)\n", "llm_tongyi = Tongyi(\n", " model_name=\"qwen-turbo\",\n", " dashscope_api_key=os.getenv(\"DASHSCOPE_API_KEY\")\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "

定义智能体工作流

" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from typing import Annotated\n", "\n", "from typing_extensions import TypedDict\n", "\n", "from langgraph.graph import StateGraph, START, END\n", "from langgraph.graph.message import add_messages\n", "\n", "class State(TypedDict):\n", " # Messages have the type \"list\". The `add_messages` function\n", " # in the annotation defines how this state key should be updated\n", " # (in this case, it appends messages to the list, rather than overwriting them)\n", " messages: Annotated[list, add_messages]\n", "\n", "graph_builder = StateGraph(State)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "

添加智能体节点

" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def chatbot(state: State):\n", " return {\"messages\": [llm_tongyi.invoke(state[\"messages\"])]}\n", "\n", "# The first argument is the unique node name\n", "# The second argument is the function or object that will be called whenever\n", "# the node is used.\n", "graph_builder.add_node(\"chatbot\", chatbot)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "

添加工作流起点和终点

" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 定义起点\n", "graph_builder.add_edge(START, \"chatbot\")\n", "# 定义终点\n", "graph_builder.add_edge(\"chatbot\", END)\n", "\n", "# 完成编辑\n", "graph = graph_builder.compile()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "

展示工作流

" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from IPython.display import Image, display\n", "\n", "try:\n", " display(Image(graph.get_graph().draw_mermaid_png()))\n", "except Exception:\n", " # This requires some extra dependencies and is optional\n", " pass" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "

执行工作流

" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 流式交互函数(适配通义模型)\n", "def stream_graph_updates(user_input: str):\n", " state = {\"messages\": [{\"role\": \"user\", \"content\": user_input}]}\n", "\n", " for event in graph.stream(state):\n", " for value in event.values():\n", " last_message = value[\"messages\"][-1]\n", " print(f\"Assistant: {last_message}\")\n", "\n", "while True:\n", " try:\n", " user_input = input(\"User: \")\n", " print(\"User: \", user_input)\n", " if user_input.lower() in [\"quit\", \"exit\", \"q\"]:\n", " print(\"Goodbye!\")\n", " break\n", " stream_graph_updates(user_input)\n", " except:\n", " # fallback if input() is not available\n", " user_input = \"What do you know about LangGraph?\"\n", " print(\"User: \" + user_input)\n", " stream_graph_updates(user_input)\n", " break" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.3" } }, "nbformat": 4, "nbformat_minor": 2 }