mirror of
https://github.com/mendableai/firecrawl.git
synced 2024-11-16 19:58:08 +08:00
278 lines
540 KiB
Plaintext
278 lines
540 KiB
Plaintext
{
|
||
"cells": [
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"# Visualizing Website Topics (Claude + Firecrawl + E2B)\n",
|
||
"\n",
|
||
"**Powered by [Claude 3.5 Sonnet](https://www.anthropic.com/news/claude-3-5-sonnet), [Firecrawl](https://www.firecrawl.dev/), and [Code Interpreter SDK](https://github.com/e2b-dev/code-interpreter) by [E2B](https://e2b.dev/docs)**\n",
|
||
"\n",
|
||
"Scrape a website with Firecrawl and then plot the most common topics using Claude and Code Interpreter\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"%pip install e2b_code_interpreter anthropic firecrawl-py "
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 1,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"import os\n",
|
||
"import datetime\n",
|
||
"import time\n",
|
||
"from firecrawl import FirecrawlApp\n",
|
||
"import json\n",
|
||
"\n",
|
||
"# TODO: Get your Anthropic API key from https://anthropic.com\n",
|
||
"anthropic_api_key = \"your-anthropic-api-key\"\n",
|
||
"# TODO: Get your Firecrawl API key from https://www.firecrawl.dev\n",
|
||
"firecrawl_api_key = \"your-firecrawl-api-key\"\n",
|
||
"# TODO: Get your E2B API key from https://e2b.dev/docs\n",
|
||
"e2b_api_key = \"your-e2b-api-key\""
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 3,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"[{'content': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\narXiv\\n=====\\n\\nLangChain implements the latest research in the field of Natural Language Processing. This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference, Templates, and Cookbooks.\\n\\nFrom the opposite direction, scientists use LangChain in research and reference LangChain in the research papers. Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header)\\n.\\n\\nSummary[\\u200b](#summary \"Direct link to Summary\")\\n\\n----------------------------------------------\\n\\n| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation |\\n| --- | --- | --- | --- |\\n| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024-02-06 | `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb) |\\n| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024-01-31 | `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb) |\\n| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024-01-29 | `Cookbook:` [langgraph\\\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb) |\\n| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024-01-08 | `Cookbook:` [together\\\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb) |\\n| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval) |\\n| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki) |\\n| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023-10-17 | `Cookbook:` [langgraph\\\\_self\\\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb) |\\n| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)<br>, `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb) |\\n| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023-07-18 | `Cookbook:` [Semi\\\\_Structured\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) |\\n| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)<br>, `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb) |\\n| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain\\\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)<br>, `Cookbook:` [tree\\\\_of\\\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb) |\\n| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan\\\\_and\\\\_execute\\\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb) |\\n| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi\\\\_structured\\\\_and\\\\_multi\\\\_modal\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)<br>, [Semi\\\\_structured\\\\_multi\\\\_modal\\\\_RAG\\\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) |\\n| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O\\'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent\\\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)<br>, [generative\\\\_agents\\\\_interactive\\\\_simulacra\\\\_of\\\\_human\\\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) |\\n| `2303.17760v2` [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel\\\\_role\\\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb) |\\n| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain\\\\_experimental.autonomous\\\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)<br>, `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb) |\\n| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb\\\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas) |\\n| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)<br>, `Template:` [hyde](https://python.langchain.com/docs/templates/hyde)<br>, `Cookbook:` [hypothetical\\\\_document\\\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb) |\\n| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain\\\\_experimental.fallacy\\\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal) |\\n| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain\\\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector) |\\n| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain\\\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)<br>, [langchain\\\\_experimental.pal\\\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)<br>, `Cookbook:` [program\\\\_aided\\\\_language\\\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb) |\\n| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere)<br>, [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface)<br>, [docs/integrations/tools/ionic\\\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)<br>, `API:` [langchain...create\\\\_react\\\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)<br>, [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain) |\\n| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop\\\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake) |\\n| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain\\\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings) |\\n| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain\\\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)<br>, [langchain\\\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) |\\n| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain\\\\_experimental.open\\\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip) |\\n| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text\\\\_embedding/sentence\\\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers) |\\n\\nSelf-Discover: Large Language Models Self-Compose Reasoning Structures[\\u200b](#self-discover-large-language-models-self-compose-reasoning-structures \"Direct link to Self-Discover: Large Language Models Self-Compose Reasoning Structures\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2402.03620v1\\n \\n* **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures\\n \\n* **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.\\n \\n* **Published Date:** 2024-02-06\\n \\n* **URL:** [http://arxiv.org/abs/2402.03620v1](http://arxiv.org/abs/2402.03620v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)\\n \\n\\n**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the task-intrinsic reasoning structures to tackle complex reasoning problems that are challenging for typical prompting methods. Core to the framework is a self-discovery process where LLMs select multiple atomic reasoning modules such as critical thinking and step-by-step thinking, and compose them into an explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER substantially improves GPT-4 and PaLM 2\\'s performance on challenging reasoning benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER outperforms inference-intensive methods such as CoT-Self-Consistency by more than 20%, while requiring 10-40x fewer inference compute. Finally, we show that the self-discovered reasoning structures are universally applicable across model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share commonalities with human reasoning patterns.\\n\\nRAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval[\\u200b](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval \"Direct link to RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.18059v1\\n \\n* **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\\n \\n* **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.\\n \\n* **Published Date:** 2024-01-31\\n \\n* **URL:** [http://arxiv.org/abs/2401.18059v1](http://arxiv.org/abs/2401.18059v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)\\n \\n\\n**Abstract:** Retrieval-augmented language models can better adapt to changes in world state and incorporate long-tail knowledge. However, most existing methods retrieve only short contiguous chunks from a retrieval corpus, limiting holistic understanding of the overall document context. We introduce the novel approach of recursively embedding, clustering, and summarizing chunks of text, constructing a tree with differing levels of summarization from the bottom up. At inference time, our RAPTOR model retrieves from this tree, integrating information across lengthy documents at different levels of abstraction. Controlled experiments show that retrieval with recursive summaries offers significant improvements over traditional retrieval-augmented LMs on several tasks. On question-answering tasks that involve complex, multi-step reasoning, we show state-of-the-art results; for example, by coupling RAPTOR retrieval with the use of GPT-4, we can improve the best performance on the QuALITY benchmark by 20% in absolute accuracy.\\n\\nCorrective Retrieval Augmented Generation[\\u200b](#corrective-retrieval-augmented-generation \"Direct link to Corrective Retrieval Augmented Generation\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.15884v2\\n \\n* **Title:** Corrective Retrieval Augmented Generation\\n \\n* **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.\\n \\n* **Published Date:** 2024-01-29\\n \\n* **URL:** [http://arxiv.org/abs/2401.15884v2](http://arxiv.org/abs/2401.15884v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [langgraph\\\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the accuracy of generated texts cannot be secured solely by the parametric knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a practicable complement to LLMs, it relies heavily on the relevance of retrieved documents, raising concerns about how the model behaves if retrieval goes wrong. To this end, we propose the Corrective Retrieval Augmented Generation (CRAG) to improve the robustness of generation. Specifically, a lightweight retrieval evaluator is designed to assess the overall quality of retrieved documents for a query, returning a confidence degree based on which different knowledge retrieval actions can be triggered. Since retrieval from static and limited corpora can only return sub-optimal documents, large-scale web searches are utilized as an extension for augmenting the retrieval results. Besides, a decompose-then-recompose algorithm is designed for retrieved documents to selectively focus on key information and filter out irrelevant information in them. CRAG is plug-and-play and can be seamlessly coupled with various RAG-based approaches. Experiments on four datasets covering short- and long-form generation tasks show that CRAG can significantly improve the performance of RAG-based approaches.\\n\\nMixtral of Experts[\\u200b](#mixtral-of-experts \"Direct link to Mixtral of Experts\")\\n\\n-------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.04088v1\\n \\n* **Title:** Mixtral of Experts\\n \\n* **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.\\n \\n* **Published Date:** 2024-01-08\\n \\n* **URL:** [http://arxiv.org/abs/2401.04088v1](http://arxiv.org/abs/2401.04088v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [together\\\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)\\n \\n\\n**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model. Mixtral has the same architecture as Mistral 7B, with the difference that each layer is composed of 8 feedforward blocks (i.e. experts). For every token, at each layer, a router network selects two experts to process the current state and combine their outputs. Even though each token only sees two experts, the selected experts can be different at each timestep. As a result, each token has access to 47B parameters, but only uses 13B active parameters during inference. Mixtral was trained with a context size of 32k tokens and it outperforms or matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks. We also provide a model fine-tuned to follow instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo, Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both the base and instruct models are released under the Apache 2.0 license.\\n\\nDense X Retrieval: What Retrieval Granularity Should We Use?[\\u200b](#dense-x-retrieval-what-retrieval-granularity-should-we-use \"Direct link to Dense X Retrieval: What Retrieval Granularity Should We Use?\")\\n\\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2312.06648v2\\n \\n* **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?\\n \\n* **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.\\n \\n* **Published Date:** 2023-12-11\\n \\n* **URL:** [http://arxiv.org/abs/2312.06648v2](http://arxiv.org/abs/2312.06648v2)\\n \\n* **LangChain:**\\n \\n * **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)\\n \\n\\n**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or world knowledge in open-domain NLP tasks. When we use a learned dense retriever on a retrieval corpus at inference time, an often-overlooked design choice is the retrieval unit in which the corpus is indexed, e.g. document, passage, or sentence. We discover that the retrieval unit choice significantly impacts the performance of both retrieval and downstream tasks. Distinct from the typical approach of using passages or sentences, we introduce a novel retrieval unit, proposition, for dense retrieval. Propositions are defined as atomic expressions within text, each encapsulating a distinct factoid and presented in a concise, self-contained natural language format. We conduct an empirical comparison of different retrieval granularity. Our results reveal that proposition-based retrieval significantly outperforms traditional passage or sentence-based methods in dense retrieval. Moreover, retrieval by proposition also enhances the performance of downstream QA tasks, since the retrieved texts are more condensed with question-relevant information, reducing the need for lengthy input tokens and minimizing the inclusion of extraneous, irrelevant information.\\n\\nChain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models[\\u200b](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models \"Direct link to Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2311.09210v1\\n \\n* **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\\n \\n* **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.\\n \\n* **Published Date:** 2023-11-15\\n \\n* **URL:** [http://arxiv.org/abs/2311.09210v1](http://arxiv.org/abs/2311.09210v1)\\n \\n* **LangChain:**\\n \\n * **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)\\n \\n\\n**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial advancement in the capabilities of large language models, notably in reducing factual hallucination by leveraging external knowledge sources. However, the reliability of the retrieved information is not always guaranteed. The retrieval of irrelevant data can lead to misguided responses, and potentially causing the model to overlook its inherent knowledge, even when it possesses adequate information to address the query. Moreover, standard RALMs often struggle to assess whether they possess adequate knowledge, both intrinsic and retrieved, to provide an accurate answer. In situations where knowledge is lacking, these systems should ideally respond with \"unknown\" when the answer is unattainable. In response to these challenges, we introduces Chain-of-Noting (CoN), a novel approach aimed at improving the robustness of RALMs in facing noisy, irrelevant documents and in handling unknown scenarios. The core idea of CoN is to generate sequential reading notes for retrieved documents, enabling a thorough evaluation of their relevance to the given question and integrating this information to formulate the final answer. We employed ChatGPT to create training data for CoN, which was subsequently trained on an LLaMa-2 7B model. Our experiments across four open-domain QA benchmarks show that RALMs equipped with CoN significantly outperform standard RALMs. Notably, CoN achieves an average improvement of +7.9 in EM score given entirely noisy retrieved documents and +10.5 in rejection rates for real-time questions that fall outside the pre-training knowledge scope.\\n\\nSelf-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection[\\u200b](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection \"Direct link to Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2310.11511v1\\n \\n* **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\\n \\n* **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.\\n \\n* **Published Date:** 2023-10-17\\n \\n* **URL:** [http://arxiv.org/abs/2310.11511v1](http://arxiv.org/abs/2310.11511v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [langgraph\\\\_self\\\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)\\n \\n\\n**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often produce responses containing factual inaccuracies due to their sole reliance on the parametric knowledge they encapsulate. Retrieval-Augmented Generation (RAG), an ad hoc approach that augments LMs with retrieval of relevant knowledge, decreases such issues. However, indiscriminately retrieving and incorporating a fixed number of retrieved passages, regardless of whether retrieval is necessary, or passages are relevant, diminishes LM versatility or can lead to unhelpful response generation. We introduce a new framework called Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM\\'s quality and factuality through retrieval and self-reflection. Our framework trains a single arbitrary LM that adaptively retrieves passages on-demand, and generates and reflects on retrieved passages and its own generations using special tokens, called reflection tokens. Generating reflection tokens makes the LM controllable during the inference phase, enabling it to tailor its behavior to diverse task requirements. Experiments show that Self-RAG (7B and 13B parameters) significantly outperforms state-of-the-art LLMs and retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA, reasoning and fact verification tasks, and it shows significant gains in improving factuality and citation accuracy for long-form generations relative to these models.\\n\\nTake a Step Back: Evoking Reasoning via Abstraction in Large Language Models[\\u200b](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models \"Direct link to Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2310.06117v2\\n \\n* **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\\n \\n* **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.\\n \\n* **Published Date:** 2023-10-09\\n \\n* **URL:** [http://arxiv.org/abs/2310.06117v2](http://arxiv.org/abs/2310.06117v2)\\n \\n* **LangChain:**\\n \\n * **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)\\n \\n * **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)\\n \\n\\n**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide reasoning, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe substantial performance gains on various challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7% and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.\\n\\nLlama 2: Open Foundation and Fine-Tuned Chat Models[\\u200b](#llama-2-open-foundation-and-fine-tuned-chat-models \"Direct link to Llama 2: Open Foundation and Fine-Tuned Chat Models\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2307.09288v2\\n \\n* **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models\\n \\n* **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.\\n \\n* **Published Date:** 2023-07-18\\n \\n* **URL:** [http://arxiv.org/abs/2307.09288v2](http://arxiv.org/abs/2307.09288v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [Semi\\\\_Structured\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)\\n \\n\\n**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.\\n\\nQuery Rewriting for Retrieval-Augmented Large Language Models[\\u200b](#query-rewriting-for-retrieval-augmented-large-language-models \"Direct link to Query Rewriting for Retrieval-Augmented Large Language Models\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.14283v3\\n \\n* **Title:** Query Rewriting for Retrieval-Augmented Large Language Models\\n \\n* **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.\\n \\n* **Published Date:** 2023-05-23\\n \\n* **URL:** [http://arxiv.org/abs/2305.14283v3](http://arxiv.org/abs/2305.14283v3)\\n \\n* **LangChain:**\\n \\n * **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)\\n \\n * **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)\\n \\n\\n**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the retrieve-then-read pipeline, making remarkable progress in knowledge-intensive tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of the previous retrieve-then-read for the retrieval-augmented LLMs from the perspective of the query rewriting. Unlike prior studies focusing on adapting either the retriever or the reader, our approach pays attention to the adaptation of the search query itself, for there is inevitably a gap between the input text and the needed knowledge in retrieval. We first prompt an LLM to generate the query, then use a web search engine to retrieve contexts. Furthermore, to better align the query to the frozen modules, we propose a trainable scheme for our pipeline. A small language model is adopted as a trainable rewriter to cater to the black-box LLM reader. The rewriter is trained using the feedback of the LLM reader by reinforcement learning. Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice QA. Experiments results show consistent performance improvement, indicating that our framework is proven effective and scalable, and brings a new framework for retrieval-augmented LLM.\\n\\nLarge Language Model Guided Tree-of-Thought[\\u200b](#large-language-model-guided-tree-of-thought \"Direct link to Large Language Model Guided Tree-of-Thought\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.08291v1\\n \\n* **Title:** Large Language Model Guided Tree-of-Thought\\n \\n* **Authors:** Jieyi Long\\n \\n* **Published Date:** 2023-05-15\\n \\n* **URL:** [http://arxiv.org/abs/2305.08291v1](http://arxiv.org/abs/2305.08291v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)\\n \\n * **Cookbook:** [tree\\\\_of\\\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)\\n \\n\\n**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel approach aimed at improving the problem-solving capabilities of auto-regressive large language models (LLMs). The ToT technique is inspired by the human mind\\'s approach for solving complex reasoning tasks through trial and error. In this process, the human mind explores the solution space through a tree-like thought process, allowing for backtracking when necessary. To implement ToT as a software system, we augment an LLM with additional modules including a prompter agent, a checker module, a memory module, and a ToT controller. In order to solve a given problem, these modules engage in a multi-round conversation with the LLM. The memory module records the conversation and state history of the problem solving process, which allows the system to backtrack to the previous steps of the thought-process and explore other directions from there. To verify the effectiveness of the proposed technique, we implemented a ToT-based solver for the Sudoku Puzzle. Experimental results show that the ToT framework can significantly increase the success rate of Sudoku puzzle solving. Our implementation of the ToT-based Sudoku solver is available on GitHub: \\\\\\\\url{[https://github.com/jieyilong/tree-of-thought-puzzle-solver}](https://github.com/jieyilong/tree-of-thought-puzzle-solver%7D)\\n.\\n\\nPlan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models[\\u200b](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models \"Direct link to Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.04091v3\\n \\n* **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\\n \\n* **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.\\n \\n* **Published Date:** 2023-05-06\\n \\n* **URL:** [http://arxiv.org/abs/2305.04091v3](http://arxiv.org/abs/2305.04091v3)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [plan\\\\_and\\\\_execute\\\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive performance in various NLP tasks. To tackle multi-step reasoning tasks, few-shot chain-of-thought (CoT) prompting includes a few manually crafted step-by-step reasoning demonstrations which enable LLMs to explicitly generate reasoning steps and improve their reasoning task accuracy. To eliminate the manual effort, Zero-shot-CoT concatenates the target problem statement with \"Let\\'s think step by step\" as an input prompt to LLMs. Despite the success of Zero-shot-CoT, it still suffers from three pitfalls: calculation errors, missing-step errors, and semantic misunderstanding errors. To address the missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of two components: first, devising a plan to divide the entire task into smaller subtasks, and then carrying out the subtasks according to the plan. To address the calculation errors and improve the quality of generated reasoning steps, we extend PS prompting with more detailed instructions and derive PS+ prompting. We evaluate our proposed prompting strategy on ten datasets across three reasoning problems. The experimental results over GPT-3 show that our proposed zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought Prompting, and has comparable performance with 8-shot CoT prompting on the math reasoning problem. The code can be found at [https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting](https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting)\\n.\\n\\nVisual Instruction Tuning[\\u200b](#visual-instruction-tuning \"Direct link to Visual Instruction Tuning\")\\n\\n----------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2304.08485v2\\n \\n* **Title:** Visual Instruction Tuning\\n \\n* **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.\\n \\n* **Published Date:** 2023-04-17\\n \\n* **URL:** [http://arxiv.org/abs/2304.08485v2](http://arxiv.org/abs/2304.08485v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [Semi\\\\_structured\\\\_and\\\\_multi\\\\_modal\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)\\n , [Semi\\\\_structured\\\\_multi\\\\_modal\\\\_RAG\\\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)\\n \\n\\n**Abstract:** Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available.\\n\\nGenerative Agents: Interactive Simulacra of Human Behavior[\\u200b](#generative-agents-interactive-simulacra-of-human-behavior \"Direct link to Generative Agents: Interactive Simulacra of Human Behavior\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2304.03442v2\\n \\n* **Title:** Generative Agents: Interactive Simulacra of Human Behavior\\n \\n* **Authors:** Joon Sung Park, Joseph C. O\\'Brien, Carrie J. Cai, et al.\\n \\n* **Published Date:** 2023-04-07\\n \\n* **URL:** [http://arxiv.org/abs/2304.03442v2](http://arxiv.org/abs/2304.03442v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [multiagent\\\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)\\n , [generative\\\\_agents\\\\_interactive\\\\_simulacra\\\\_of\\\\_human\\\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)\\n \\n\\n**Abstract:** Believable proxies of human behavior can empower interactive applications ranging from immersive environments to rehearsal spaces for interpersonal communication to prototyping tools. In this paper, we introduce generative agents--computational software agents that simulate believable human behavior. Generative agents wake up, cook breakfast, and head to work; artists paint, while authors write; they form opinions, notice each other, and initiate conversations; they remember and reflect on days past as they plan the next day. To enable generative agents, we describe an architecture that extends a large language model to store a complete record of the agent\\'s experiences using natural language, synthesize those memories over time into higher-level reflections, and retrieve them dynamically to plan behavior. We instantiate generative agents to populate an interactive sandbox environment inspired by The Sims, where end users can interact with a small town of twenty five agents using natural language. In an evaluation, these generative agents produce believable individual and emergent social behaviors: for example, starting with only a single user-specified notion that one agent wants to throw a Valentine\\'s Day party, the agents autonomously spread invitations to the party over the next two days, make new acquaintances, ask each other out on dates to the party, and coordinate to show up for the party together at the right time. We demonstrate through ablation that the components of our agent architecture--observation, planning, and reflection--each contribute critically to the believability of agent behavior. By fusing large language models with computational, interactive agents, this work introduces architectural and interaction patterns for enabling believable simulations of human behavior.\\n\\nCAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society[\\u200b](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society \"Direct link to CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.17760v2\\n \\n* **Title:** CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society\\n \\n* **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.\\n \\n* **Published Date:** 2023-03-31\\n \\n* **URL:** [http://arxiv.org/abs/2303.17760v2](http://arxiv.org/abs/2303.17760v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [camel\\\\_role\\\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)\\n \\n\\n**Abstract:** The rapid advancement of chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents, and provides insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of a society of agents, providing a valuable resource for investigating conversational language models. In particular, we conduct comprehensive studies on instruction-following cooperation in multi-agent settings. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond: [https://github.com/camel-ai/camel](https://github.com/camel-ai/camel)\\n.\\n\\nHuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face[\\u200b](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face \"Direct link to HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.17580v4\\n \\n* **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\\n \\n* **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.\\n \\n* **Published Date:** 2023-03-30\\n \\n* **URL:** [http://arxiv.org/abs/2303.17580v4](http://arxiv.org/abs/2303.17580v4)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.autonomous\\\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)\\n \\n * **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)\\n \\n\\n**Abstract:** Solving complicated AI tasks with different domains and modalities is a key step toward artificial general intelligence. While there are numerous AI models available for various domains and modalities, they cannot handle complicated AI tasks autonomously. Considering large language models (LLMs) have exhibited exceptional abilities in language understanding, generation, interaction, and reasoning, we advocate that LLMs could act as a controller to manage existing AI models to solve complicated AI tasks, with language serving as a generic interface to empower this. Based on this philosophy, we present HuggingGPT, an LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI models in machine learning communities (e.g., Hugging Face) to solve AI tasks. Specifically, we use ChatGPT to conduct task planning when receiving a user request, select models according to their function descriptions available in Hugging Face, execute each subtask with the selected AI model, and summarize the response according to the execution results. By leveraging the strong language capability of ChatGPT and abundant AI models in Hugging Face, HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different modalities and domains and achieve impressive results in language, vision, speech, and other challenging tasks, which paves a new way towards the realization of artificial general intelligence.\\n\\nGPT-4 Technical Report[\\u200b](#gpt-4-technical-report \"Direct link to GPT-4 Technical Report\")\\n\\n-------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.08774v6\\n \\n* **Title:** GPT-4 Technical Report\\n \\n* **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.\\n \\n* **Published Date:** 2023-03-15\\n \\n* **URL:** [http://arxiv.org/abs/2303.08774v6](http://arxiv.org/abs/2303.08774v6)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/vectorstores/mongodb\\\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)\\n \\n\\n**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4\\'s performance based on models trained with no more than 1/1,000th the compute of GPT-4.\\n\\nA Watermark for Large Language Models[\\u200b](#a-watermark-for-large-language-models \"Direct link to A Watermark for Large Language Models\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2301.10226v4\\n \\n* **Title:** A Watermark for Large Language Models\\n \\n* **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.\\n \\n* **Published Date:** 2023-01-24\\n \\n* **URL:** [http://arxiv.org/abs/2301.10226v4](http://arxiv.org/abs/2301.10226v4)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Potential harms of large language models can be mitigated by watermarking model output, i.e., embedding signals into generated text that are invisible to humans but algorithmically detectable from a short span of tokens. We propose a watermarking framework for proprietary language models. The watermark can be embedded with negligible impact on text quality, and can be detected using an efficient open-source algorithm without access to the language model API or parameters. The watermark works by selecting a randomized set of \"green\" tokens before a word is generated, and then softly promoting use of green tokens during sampling. We propose a statistical test for detecting the watermark with interpretable p-values, and derive an information-theoretic framework for analyzing the sensitivity of the watermark. We test the watermark using a multi-billion parameter model from the Open Pretrained Transformer (OPT) family, and discuss robustness and security.\\n\\nPrecise Zero-Shot Dense Retrieval without Relevance Labels[\\u200b](#precise-zero-shot-dense-retrieval-without-relevance-labels \"Direct link to Precise Zero-Shot Dense Retrieval without Relevance Labels\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2212.10496v1\\n \\n* **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels\\n \\n* **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.\\n \\n* **Published Date:** 2022-12-20\\n \\n* **URL:** [http://arxiv.org/abs/2212.10496v1](http://arxiv.org/abs/2212.10496v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)\\n \\n * **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)\\n \\n * **Cookbook:** [hypothetical\\\\_document\\\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)\\n \\n\\n**Abstract:** While dense retrieval has been shown effective and efficient across tasks and languages, it remains difficult to create effective fully zero-shot dense retrieval systems when no relevance label is available. In this paper, we recognize the difficulty of zero-shot learning and encoding relevance. Instead, we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a query, HyDE first zero-shot instructs an instruction-following language model (e.g. InstructGPT) to generate a hypothetical document. The document captures relevance patterns but is unreal and may contain false details. Then, an unsupervised contrastively learned encoder~(e.g. Contriever) encodes the document into an embedding vector. This vector identifies a neighborhood in the corpus embedding space, where similar real documents are retrieved based on vector similarity. This second step ground the generated document to the actual corpus, with the encoder\\'s dense bottleneck filtering out the incorrect details. Our experiments show that HyDE significantly outperforms the state-of-the-art unsupervised dense retriever Contriever and shows strong performance comparable to fine-tuned retrievers, across various tasks (e.g. web search, QA, fact verification) and languages~(e.g. sw, ko, ja).\\n\\nRobust and Explainable Identification of Logical Fallacies in Natural Language Arguments[\\u200b](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments \"Direct link to Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2212.07425v3\\n \\n* **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\\n \\n* **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.\\n \\n* **Published Date:** 2022-12-12\\n \\n* **URL:** [http://arxiv.org/abs/2212.07425v3](http://arxiv.org/abs/2212.07425v3)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.fallacy\\\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)\\n \\n\\n**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been amplified in the Internet era. Given the volume of data and the subtlety of identifying violations of argumentation norms, supporting information analytics tasks, like content moderation, with trustworthy methods that can identify logical fallacies is essential. In this paper, we formalize prior theoretical work on logical fallacies into a comprehensive three-stage evaluation framework of detection, coarse-grained, and fine-grained classification. We adapt existing evaluation datasets for each stage of the evaluation. We employ three families of robust and explainable methods based on prototype reasoning, instance-based reasoning, and knowledge injection. The methods combine language models with background knowledge and explainable mechanisms. Moreover, we address data sparsity with strategies for data augmentation and curriculum learning. Our three-stage framework natively consolidates prior datasets and methods from existing tasks, like propaganda detection, serving as an overarching evaluation testbed. We extensively evaluate these methods on our datasets, focusing on their robustness and explainability. Our results provide insight into the strengths and weaknesses of the methods on different components and fallacy classes, indicating that fallacy identification is a challenging task that may require specialized forms of reasoning to capture various classes. We share our open-source code and data on GitHub to support further work on logical fallacy identification.\\n\\nComplementary Explanations for Effective In-Context Learning[\\u200b](#complementary-explanations-for-effective-in-context-learning \"Direct link to Complementary Explanations for Effective In-Context Learning\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2211.13892v2\\n \\n* **Title:** Complementary Explanations for Effective In-Context Learning\\n \\n* **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.\\n \\n* **Published Date:** 2022-11-25\\n \\n* **URL:** [http://arxiv.org/abs/2211.13892v2](http://arxiv.org/abs/2211.13892v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)\\n \\n\\n**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in learning from explanations in prompts, but there has been limited understanding of exactly how these explanations function or why they are effective. This work aims to better understand the mechanisms by which explanations are used for in-context learning. We first study the impact of two different factors on the performance of prompts with explanations: the computation trace (the way the solution is decomposed) and the natural language used to express the prompt. By perturbing explanations on three controlled tasks, we show that both factors contribute to the effectiveness of explanations. We further study how to form maximally effective sets of explanations for solving a given test query. We find that LLMs can benefit from the complementarity of the explanation set: diverse reasoning skills shown by different exemplars can lead to better performance. Therefore, we propose a maximal marginal relevance-based exemplar selection approach for constructing exemplar sets that are both relevant as well as complementary, which successfully improves the in-context learning performance across three real-world tasks on multiple LLMs.\\n\\nPAL: Program-aided Language Models[\\u200b](#pal-program-aided-language-models \"Direct link to PAL: Program-aided Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2211.10435v2\\n \\n* **Title:** PAL: Program-aided Language Models\\n \\n* **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.\\n \\n* **Published Date:** 2022-11-18\\n \\n* **URL:** [http://arxiv.org/abs/2211.10435v2](http://arxiv.org/abs/2211.10435v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)\\n , [langchain\\\\_experimental.pal\\\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)\\n \\n * **Cookbook:** [program\\\\_aided\\\\_language\\\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability to perform arithmetic and symbolic reasoning tasks, when provided with a few examples at test time (\"few-shot prompting\"). Much of this success can be attributed to prompting methods such as \"chain-of-thought\\'\\', which employ LLMs for both understanding the problem description by decomposing it into steps, as well as solving each step of the problem. While LLMs seem to be adept at this sort of step-by-step decomposition, LLMs often make logical and arithmetic mistakes in the solution part, even when the problem is decomposed correctly. In this paper, we present Program-Aided Language models (PAL): a novel approach that uses the LLM to read natural language problems and generate programs as the intermediate reasoning steps, but offloads the solution step to a runtime such as a Python interpreter. With PAL, decomposing the natural language problem into runnable steps remains the only learning task for the LLM, while solving is delegated to the interpreter. We demonstrate this synergy between a neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all these natural language reasoning tasks, generating code using an LLM and reasoning using a Python interpreter leads to more accurate results than much larger models. For example, PAL using Codex achieves state-of-the-art few-shot accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B which uses chain-of-thought by absolute 15% top-1. Our code and data are publicly available at [http://reasonwithpal.com/](http://reasonwithpal.com/)\\n .\\n\\nReAct: Synergizing Reasoning and Acting in Language Models[\\u200b](#react-synergizing-reasoning-and-acting-in-language-models \"Direct link to ReAct: Synergizing Reasoning and Acting in Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2210.03629v3\\n \\n* **Title:** ReAct: Synergizing Reasoning and Acting in Language Models\\n \\n* **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.\\n \\n* **Published Date:** 2022-10-06\\n \\n* **URL:** [http://arxiv.org/abs/2210.03629v3](http://arxiv.org/abs/2210.03629v3)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere)\\n , [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface)\\n , [docs/integrations/tools/ionic\\\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)\\n \\n * **API Reference:** [langchain...create\\\\_react\\\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)\\n , [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)\\n \\n\\n**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities across tasks in language understanding and interactive decision making, their abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g. action plan generation) have primarily been studied as separate topics. In this paper, we explore the use of LLMs to generate both reasoning traces and task-specific actions in an interleaved manner, allowing for greater synergy between the two: reasoning traces help the model induce, track, and update action plans as well as handle exceptions, while actions allow it to interface with external sources, such as knowledge bases or environments, to gather additional information. We apply our approach, named ReAct, to a diverse set of language and decision making tasks and demonstrate its effectiveness over state-of-the-art baselines, as well as improved human interpretability and trustworthiness over methods without reasoning or acting components. Concretely, on question answering (HotpotQA) and fact verification (Fever), ReAct overcomes issues of hallucination and error propagation prevalent in chain-of-thought reasoning by interacting with a simple Wikipedia API, and generates human-like task-solving trajectories that are more interpretable than baselines without reasoning traces. On two interactive decision making benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and reinforcement learning methods by an absolute success rate of 34% and 10% respectively, while being prompted with only one or two in-context examples. Project site with code: [https://react-lm.github.io](https://react-lm.github.io)\\n\\nDeep Lake: a Lakehouse for Deep Learning[\\u200b](#deep-lake-a-lakehouse-for-deep-learning \"Direct link to Deep Lake: a Lakehouse for Deep Learning\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2209.10785v2\\n \\n* **Title:** Deep Lake: a Lakehouse for Deep Learning\\n \\n* **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.\\n \\n* **Published Date:** 2022-09-22\\n \\n* **URL:** [http://arxiv.org/abs/2209.10785v2](http://arxiv.org/abs/2209.10785v2)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/providers/activeloop\\\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)\\n \\n\\n**Abstract:** Traditional data lakes provide critical data infrastructure for analytical workloads by enabling time travel, running SQL queries, ingesting data with ACID transactions, and visualizing petabyte-scale datasets on cloud storage. They allow organizations to break down data silos, unlock data-driven decision-making, improve operational efficiency, and reduce costs. However, as deep learning usage increases, traditional data lakes are not well-designed for applications such as natural language processing (NLP), audio processing, computer vision, and applications involving non-tabular datasets. This paper presents Deep Lake, an open-source lakehouse for deep learning applications developed at Activeloop. Deep Lake maintains the benefits of a vanilla data lake with one key difference: it stores complex data, such as images, videos, annotations, as well as tabular data, in the form of tensors and rapidly streams the data over the network to (a) Tensor Query Language, (b) in-browser visualization engine, or (c) deep learning frameworks without sacrificing GPU utilization. Datasets stored in Deep Lake can be accessed from PyTorch, TensorFlow, JAX, and integrate with numerous MLOps tools.\\n\\nBitext Mining Using Distilled Sentence Representations for Low-Resource Languages[\\u200b](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages \"Direct link to Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2205.12654v1\\n \\n* **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\\n \\n* **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk\\n \\n* **Published Date:** 2022-05-25\\n \\n* **URL:** [http://arxiv.org/abs/2205.12654v1](http://arxiv.org/abs/2205.12654v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)\\n \\n\\n**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent languages is challenging, in particular to cover the long tail of low-resource languages. A promising approach has been to train one-for-all multilingual models capable of cross-lingual transfer, but these models often suffer from insufficient capacity and interference between unrelated languages. Instead, we move away from this approach and focus on training multiple language (family) specific representations, but most prominently enable all languages to still be encoded in the same representational space. To achieve this, we focus on teacher-student training, allowing all encoders to be mutually compatible for bitext mining, and enabling fast learning of new languages. We introduce a new teacher-student training scheme which combines supervised and self-supervised training, allowing encoders to take advantage of monolingual training data, which is valuable in the low-resource setting. Our approach significantly outperforms the original LASER encoder. We study very low-resource languages and handle 50 African languages, many of which are not covered by any other model. For these languages, we train sentence encoders, mine bitexts, and validate the bitexts by training NMT systems.\\n\\nEvaluating the Text-to-SQL Capabilities of Large Language Models[\\u200b](#evaluating-the-text-to-sql-capabilities-of-large-language-models \"Direct link to Evaluating the Text-to-SQL Capabilities of Large Language Models\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2204.00498v1\\n \\n* **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models\\n \\n* **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau\\n \\n* **Published Date:** 2022-03-15\\n \\n* **URL:** [http://arxiv.org/abs/2204.00498v1](http://arxiv.org/abs/2204.00498v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)\\n , [langchain\\\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)\\n \\n\\n**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex language model. We find that, without any finetuning, Codex is a strong baseline on the Spider benchmark; we also analyze the failure modes of Codex in this setting. Furthermore, we demonstrate on the GeoQuery and Scholar benchmarks that a small number of in-domain examples provided in the prompt enables Codex to perform better than state-of-the-art models finetuned on such few-shot examples.\\n\\nLocally Typical Sampling[\\u200b](#locally-typical-sampling \"Direct link to Locally Typical Sampling\")\\n\\n-------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2202.00666v5\\n \\n* **Title:** Locally Typical Sampling\\n \\n* **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.\\n \\n* **Published Date:** 2022-02-01\\n \\n* **URL:** [http://arxiv.org/abs/2202.00666v5](http://arxiv.org/abs/2202.00666v5)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Today\\'s probabilistic language generators fall short when it comes to producing coherent and fluent text despite the fact that the underlying models perform well under standard metrics, e.g., perplexity. This discrepancy has puzzled the language generation community for the last few years. In this work, we posit that the abstraction of natural language generation as a discrete stochastic process--which allows for an information-theoretic analysis--can provide new insights into the behavior of probabilistic language generators, e.g., why high-probability texts can be dull or repetitive. Humans use language as a means of communicating information, aiming to do so in a simultaneously efficient and error-minimizing manner; in fact, psycholinguistics research suggests humans choose each word in a string with this subconscious goal in mind. We formally define the set of strings that meet this criterion: those for which each word has an information content close to the expected information content, i.e., the conditional entropy of our model. We then propose a simple and efficient procedure for enforcing this criterion when generating from probabilistic models, which we call locally typical sampling. Automatic and human evaluations show that, in comparison to nucleus and top-k sampling, locally typical sampling offers competitive performance (in both abstractive summarization and story generation) in terms of quality while consistently reducing degenerate repetitions.\\n\\nLearning Transferable Visual Models From Natural Language Supervision[\\u200b](#learning-transferable-visual-models-from-natural-language-supervision \"Direct link to Learning Transferable Visual Models From Natural Language Supervision\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2103.00020v1\\n \\n* **Title:** Learning Transferable Visual Models From Natural Language Supervision\\n \\n* **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.\\n \\n* **Published Date:** 2021-02-26\\n \\n* **URL:** [http://arxiv.org/abs/2103.00020v1](http://arxiv.org/abs/2103.00020v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.open\\\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)\\n \\n\\n**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at [https://github.com/OpenAI/CLIP](https://github.com/OpenAI/CLIP)\\n.\\n\\nCTRL: A Conditional Transformer Language Model for Controllable Generation[\\u200b](#ctrl-a-conditional-transformer-language-model-for-controllable-generation \"Direct link to CTRL: A Conditional Transformer Language Model for Controllable Generation\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 1909.05858v2\\n \\n* **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation\\n \\n* **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.\\n \\n* **Published Date:** 2019-09-11\\n \\n* **URL:** [http://arxiv.org/abs/1909.05858v2](http://arxiv.org/abs/1909.05858v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution. We have released multiple full-sized, pretrained versions of CTRL at [https://github.com/salesforce/ctrl](https://github.com/salesforce/ctrl)\\n.\\n\\nSentence-BERT: Sentence Embeddings using Siamese BERT-Networks[\\u200b](#sentence-bert-sentence-embeddings-using-siamese-bert-networks \"Direct link to Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 1908.10084v1\\n \\n* **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\\n \\n* **Authors:** Nils Reimers, Iryna Gurevych\\n \\n* **Published Date:** 2019-08-27\\n \\n* **URL:** [http://arxiv.org/abs/1908.10084v1](http://arxiv.org/abs/1908.10084v1)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/text\\\\_embedding/sentence\\\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)\\n \\n\\n**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/arxiv_references.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Summary](#summary)\\n \\n* [Self-Discover: Large Language Models Self-Compose Reasoning Structures](#self-discover-large-language-models-self-compose-reasoning-structures)\\n \\n* [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval)\\n \\n* [Corrective Retrieval Augmented Generation](#corrective-retrieval-augmented-generation)\\n \\n* [Mixtral of Experts](#mixtral-of-experts)\\n \\n* [Dense X Retrieval: What Retrieval Granularity Should We Use?](#dense-x-retrieval-what-retrieval-granularity-should-we-use)\\n \\n* [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models)\\n \\n* [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection)\\n \\n* [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models)\\n \\n* [Llama 2: Open Foundation and Fine-Tuned Chat Models](#llama-2-open-foundation-and-fine-tuned-chat-models)\\n \\n* [Query Rewriting for Retrieval-Augmented Large Language Models](#query-rewriting-for-retrieval-augmented-large-language-models)\\n \\n* [Large Language Model Guided Tree-of-Thought](#large-language-model-guided-tree-of-thought)\\n \\n* [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models)\\n \\n* [Visual Instruction Tuning](#visual-instruction-tuning)\\n \\n* [Generative Agents: Interactive Simulacra of Human Behavior](#generative-agents-interactive-simulacra-of-human-behavior)\\n \\n* [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society)\\n \\n* [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face)\\n \\n* [GPT-4 Technical Report](#gpt-4-technical-report)\\n \\n* [A Watermark for Large Language Models](#a-watermark-for-large-language-models)\\n \\n* [Precise Zero-Shot Dense Retrieval without Relevance Labels](#precise-zero-shot-dense-retrieval-without-relevance-labels)\\n \\n* [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments)\\n \\n* [Complementary Explanations for Effective In-Context Learning](#complementary-explanations-for-effective-in-context-learning)\\n \\n* [PAL: Program-aided Language Models](#pal-program-aided-language-models)\\n \\n* [ReAct: Synergizing Reasoning and Acting in Language Models](#react-synergizing-reasoning-and-acting-in-language-models)\\n \\n* [Deep Lake: a Lakehouse for Deep Learning](#deep-lake-a-lakehouse-for-deep-learning)\\n \\n* [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages)\\n \\n* [Evaluating the Text-to-SQL Capabilities of Large Language Models](#evaluating-the-text-to-sql-capabilities-of-large-language-models)\\n \\n* [Locally Typical Sampling](#locally-typical-sampling)\\n \\n* [Learning Transferable Visual Models From Natural Language Supervision](#learning-transferable-visual-models-from-natural-language-supervision)\\n \\n* [CTRL: A Conditional Transformer Language Model for Controllable Generation](#ctrl-a-conditional-transformer-language-model-for-controllable-generation)\\n \\n* [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](#sentence-bert-sentence-embeddings-using-siamese-bert-networks)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\narXiv\\n=====\\n\\nLangChain implements the latest research in the field of Natural Language Processing. This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference, Templates, and Cookbooks.\\n\\nFrom the opposite direction, scientists use LangChain in research and reference LangChain in the research papers. Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header)\\n.\\n\\nSummary[\\u200b](#summary \"Direct link to Summary\")\\n\\n----------------------------------------------\\n\\n| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation |\\n| --- | --- | --- | --- |\\n| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024-02-06 | `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb) |\\n| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024-01-31 | `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb) |\\n| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024-01-29 | `Cookbook:` [langgraph\\\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb) |\\n| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024-01-08 | `Cookbook:` [together\\\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb) |\\n| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval) |\\n| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki) |\\n| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023-10-17 | `Cookbook:` [langgraph\\\\_self\\\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb) |\\n| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)<br>, `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb) |\\n| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023-07-18 | `Cookbook:` [Semi\\\\_Structured\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) |\\n| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)<br>, `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb) |\\n| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain\\\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)<br>, `Cookbook:` [tree\\\\_of\\\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb) |\\n| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan\\\\_and\\\\_execute\\\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb) |\\n| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi\\\\_structured\\\\_and\\\\_multi\\\\_modal\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)<br>, [Semi\\\\_structured\\\\_multi\\\\_modal\\\\_RAG\\\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) |\\n| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O\\'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent\\\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)<br>, [generative\\\\_agents\\\\_interactive\\\\_simulacra\\\\_of\\\\_human\\\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) |\\n| `2303.17760v2` [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel\\\\_role\\\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb) |\\n| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain\\\\_experimental.autonomous\\\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)<br>, `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb) |\\n| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb\\\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas) |\\n| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)<br>, `Template:` [hyde](https://python.langchain.com/docs/templates/hyde)<br>, `Cookbook:` [hypothetical\\\\_document\\\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb) |\\n| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain\\\\_experimental.fallacy\\\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal) |\\n| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain\\\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector) |\\n| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain\\\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)<br>, [langchain\\\\_experimental.pal\\\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)<br>, `Cookbook:` [program\\\\_aided\\\\_language\\\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb) |\\n| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere)<br>, [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface)<br>, [docs/integrations/tools/ionic\\\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)<br>, `API:` [langchain...create\\\\_react\\\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)<br>, [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain) |\\n| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop\\\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake) |\\n| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain\\\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings) |\\n| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain\\\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)<br>, [langchain\\\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) |\\n| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain\\\\_experimental.open\\\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip) |\\n| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text\\\\_embedding/sentence\\\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers) |\\n\\nSelf-Discover: Large Language Models Self-Compose Reasoning Structures[\\u200b](#self-discover-large-language-models-self-compose-reasoning-structures \"Direct link to Self-Discover: Large Language Models Self-Compose Reasoning Structures\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2402.03620v1\\n \\n* **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures\\n \\n* **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.\\n \\n* **Published Date:** 2024-02-06\\n \\n* **URL:** [http://arxiv.org/abs/2402.03620v1](http://arxiv.org/abs/2402.03620v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)\\n \\n\\n**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the task-intrinsic reasoning structures to tackle complex reasoning problems that are challenging for typical prompting methods. Core to the framework is a self-discovery process where LLMs select multiple atomic reasoning modules such as critical thinking and step-by-step thinking, and compose them into an explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER substantially improves GPT-4 and PaLM 2\\'s performance on challenging reasoning benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER outperforms inference-intensive methods such as CoT-Self-Consistency by more than 20%, while requiring 10-40x fewer inference compute. Finally, we show that the self-discovered reasoning structures are universally applicable across model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share commonalities with human reasoning patterns.\\n\\nRAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval[\\u200b](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval \"Direct link to RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.18059v1\\n \\n* **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\\n \\n* **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.\\n \\n* **Published Date:** 2024-01-31\\n \\n* **URL:** [http://arxiv.org/abs/2401.18059v1](http://arxiv.org/abs/2401.18059v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)\\n \\n\\n**Abstract:** Retrieval-augmented language models can better adapt to changes in world state and incorporate long-tail knowledge. However, most existing methods retrieve only short contiguous chunks from a retrieval corpus, limiting holistic understanding of the overall document context. We introduce the novel approach of recursively embedding, clustering, and summarizing chunks of text, constructing a tree with differing levels of summarization from the bottom up. At inference time, our RAPTOR model retrieves from this tree, integrating information across lengthy documents at different levels of abstraction. Controlled experiments show that retrieval with recursive summaries offers significant improvements over traditional retrieval-augmented LMs on several tasks. On question-answering tasks that involve complex, multi-step reasoning, we show state-of-the-art results; for example, by coupling RAPTOR retrieval with the use of GPT-4, we can improve the best performance on the QuALITY benchmark by 20% in absolute accuracy.\\n\\nCorrective Retrieval Augmented Generation[\\u200b](#corrective-retrieval-augmented-generation \"Direct link to Corrective Retrieval Augmented Generation\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.15884v2\\n \\n* **Title:** Corrective Retrieval Augmented Generation\\n \\n* **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.\\n \\n* **Published Date:** 2024-01-29\\n \\n* **URL:** [http://arxiv.org/abs/2401.15884v2](http://arxiv.org/abs/2401.15884v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [langgraph\\\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the accuracy of generated texts cannot be secured solely by the parametric knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a practicable complement to LLMs, it relies heavily on the relevance of retrieved documents, raising concerns about how the model behaves if retrieval goes wrong. To this end, we propose the Corrective Retrieval Augmented Generation (CRAG) to improve the robustness of generation. Specifically, a lightweight retrieval evaluator is designed to assess the overall quality of retrieved documents for a query, returning a confidence degree based on which different knowledge retrieval actions can be triggered. Since retrieval from static and limited corpora can only return sub-optimal documents, large-scale web searches are utilized as an extension for augmenting the retrieval results. Besides, a decompose-then-recompose algorithm is designed for retrieved documents to selectively focus on key information and filter out irrelevant information in them. CRAG is plug-and-play and can be seamlessly coupled with various RAG-based approaches. Experiments on four datasets covering short- and long-form generation tasks show that CRAG can significantly improve the performance of RAG-based approaches.\\n\\nMixtral of Experts[\\u200b](#mixtral-of-experts \"Direct link to Mixtral of Experts\")\\n\\n-------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.04088v1\\n \\n* **Title:** Mixtral of Experts\\n \\n* **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.\\n \\n* **Published Date:** 2024-01-08\\n \\n* **URL:** [http://arxiv.org/abs/2401.04088v1](http://arxiv.org/abs/2401.04088v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [together\\\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)\\n \\n\\n**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model. Mixtral has the same architecture as Mistral 7B, with the difference that each layer is composed of 8 feedforward blocks (i.e. experts). For every token, at each layer, a router network selects two experts to process the current state and combine their outputs. Even though each token only sees two experts, the selected experts can be different at each timestep. As a result, each token has access to 47B parameters, but only uses 13B active parameters during inference. Mixtral was trained with a context size of 32k tokens and it outperforms or matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks. We also provide a model fine-tuned to follow instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo, Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both the base and instruct models are released under the Apache 2.0 license.\\n\\nDense X Retrieval: What Retrieval Granularity Should We Use?[\\u200b](#dense-x-retrieval-what-retrieval-granularity-should-we-use \"Direct link to Dense X Retrieval: What Retrieval Granularity Should We Use?\")\\n\\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2312.06648v2\\n \\n* **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?\\n \\n* **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.\\n \\n* **Published Date:** 2023-12-11\\n \\n* **URL:** [http://arxiv.org/abs/2312.06648v2](http://arxiv.org/abs/2312.06648v2)\\n \\n* **LangChain:**\\n \\n * **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)\\n \\n\\n**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or world knowledge in open-domain NLP tasks. When we use a learned dense retriever on a retrieval corpus at inference time, an often-overlooked design choice is the retrieval unit in which the corpus is indexed, e.g. document, passage, or sentence. We discover that the retrieval unit choice significantly impacts the performance of both retrieval and downstream tasks. Distinct from the typical approach of using passages or sentences, we introduce a novel retrieval unit, proposition, for dense retrieval. Propositions are defined as atomic expressions within text, each encapsulating a distinct factoid and presented in a concise, self-contained natural language format. We conduct an empirical comparison of different retrieval granularity. Our results reveal that proposition-based retrieval significantly outperforms traditional passage or sentence-based methods in dense retrieval. Moreover, retrieval by proposition also enhances the performance of downstream QA tasks, since the retrieved texts are more condensed with question-relevant information, reducing the need for lengthy input tokens and minimizing the inclusion of extraneous, irrelevant information.\\n\\nChain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models[\\u200b](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models \"Direct link to Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2311.09210v1\\n \\n* **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\\n \\n* **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.\\n \\n* **Published Date:** 2023-11-15\\n \\n* **URL:** [http://arxiv.org/abs/2311.09210v1](http://arxiv.org/abs/2311.09210v1)\\n \\n* **LangChain:**\\n \\n * **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)\\n \\n\\n**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial advancement in the capabilities of large language models, notably in reducing factual hallucination by leveraging external knowledge sources. However, the reliability of the retrieved information is not always guaranteed. The retrieval of irrelevant data can lead to misguided responses, and potentially causing the model to overlook its inherent knowledge, even when it possesses adequate information to address the query. Moreover, standard RALMs often struggle to assess whether they possess adequate knowledge, both intrinsic and retrieved, to provide an accurate answer. In situations where knowledge is lacking, these systems should ideally respond with \"unknown\" when the answer is unattainable. In response to these challenges, we introduces Chain-of-Noting (CoN), a novel approach aimed at improving the robustness of RALMs in facing noisy, irrelevant documents and in handling unknown scenarios. The core idea of CoN is to generate sequential reading notes for retrieved documents, enabling a thorough evaluation of their relevance to the given question and integrating this information to formulate the final answer. We employed ChatGPT to create training data for CoN, which was subsequently trained on an LLaMa-2 7B model. Our experiments across four open-domain QA benchmarks show that RALMs equipped with CoN significantly outperform standard RALMs. Notably, CoN achieves an average improvement of +7.9 in EM score given entirely noisy retrieved documents and +10.5 in rejection rates for real-time questions that fall outside the pre-training knowledge scope.\\n\\nSelf-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection[\\u200b](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection \"Direct link to Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2310.11511v1\\n \\n* **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\\n \\n* **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.\\n \\n* **Published Date:** 2023-10-17\\n \\n* **URL:** [http://arxiv.org/abs/2310.11511v1](http://arxiv.org/abs/2310.11511v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [langgraph\\\\_self\\\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)\\n \\n\\n**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often produce responses containing factual inaccuracies due to their sole reliance on the parametric knowledge they encapsulate. Retrieval-Augmented Generation (RAG), an ad hoc approach that augments LMs with retrieval of relevant knowledge, decreases such issues. However, indiscriminately retrieving and incorporating a fixed number of retrieved passages, regardless of whether retrieval is necessary, or passages are relevant, diminishes LM versatility or can lead to unhelpful response generation. We introduce a new framework called Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM\\'s quality and factuality through retrieval and self-reflection. Our framework trains a single arbitrary LM that adaptively retrieves passages on-demand, and generates and reflects on retrieved passages and its own generations using special tokens, called reflection tokens. Generating reflection tokens makes the LM controllable during the inference phase, enabling it to tailor its behavior to diverse task requirements. Experiments show that Self-RAG (7B and 13B parameters) significantly outperforms state-of-the-art LLMs and retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA, reasoning and fact verification tasks, and it shows significant gains in improving factuality and citation accuracy for long-form generations relative to these models.\\n\\nTake a Step Back: Evoking Reasoning via Abstraction in Large Language Models[\\u200b](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models \"Direct link to Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2310.06117v2\\n \\n* **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\\n \\n* **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.\\n \\n* **Published Date:** 2023-10-09\\n \\n* **URL:** [http://arxiv.org/abs/2310.06117v2](http://arxiv.org/abs/2310.06117v2)\\n \\n* **LangChain:**\\n \\n * **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)\\n \\n * **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)\\n \\n\\n**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide reasoning, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe substantial performance gains on various challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7% and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.\\n\\nLlama 2: Open Foundation and Fine-Tuned Chat Models[\\u200b](#llama-2-open-foundation-and-fine-tuned-chat-models \"Direct link to Llama 2: Open Foundation and Fine-Tuned Chat Models\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2307.09288v2\\n \\n* **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models\\n \\n* **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.\\n \\n* **Published Date:** 2023-07-18\\n \\n* **URL:** [http://arxiv.org/abs/2307.09288v2](http://arxiv.org/abs/2307.09288v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [Semi\\\\_Structured\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)\\n \\n\\n**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.\\n\\nQuery Rewriting for Retrieval-Augmented Large Language Models[\\u200b](#query-rewriting-for-retrieval-augmented-large-language-models \"Direct link to Query Rewriting for Retrieval-Augmented Large Language Models\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.14283v3\\n \\n* **Title:** Query Rewriting for Retrieval-Augmented Large Language Models\\n \\n* **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.\\n \\n* **Published Date:** 2023-05-23\\n \\n* **URL:** [http://arxiv.org/abs/2305.14283v3](http://arxiv.org/abs/2305.14283v3)\\n \\n* **LangChain:**\\n \\n * **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)\\n \\n * **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)\\n \\n\\n**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the retrieve-then-read pipeline, making remarkable progress in knowledge-intensive tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of the previous retrieve-then-read for the retrieval-augmented LLMs from the perspective of the query rewriting. Unlike prior studies focusing on adapting either the retriever or the reader, our approach pays attention to the adaptation of the search query itself, for there is inevitably a gap between the input text and the needed knowledge in retrieval. We first prompt an LLM to generate the query, then use a web search engine to retrieve contexts. Furthermore, to better align the query to the frozen modules, we propose a trainable scheme for our pipeline. A small language model is adopted as a trainable rewriter to cater to the black-box LLM reader. The rewriter is trained using the feedback of the LLM reader by reinforcement learning. Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice QA. Experiments results show consistent performance improvement, indicating that our framework is proven effective and scalable, and brings a new framework for retrieval-augmented LLM.\\n\\nLarge Language Model Guided Tree-of-Thought[\\u200b](#large-language-model-guided-tree-of-thought \"Direct link to Large Language Model Guided Tree-of-Thought\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.08291v1\\n \\n* **Title:** Large Language Model Guided Tree-of-Thought\\n \\n* **Authors:** Jieyi Long\\n \\n* **Published Date:** 2023-05-15\\n \\n* **URL:** [http://arxiv.org/abs/2305.08291v1](http://arxiv.org/abs/2305.08291v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)\\n \\n * **Cookbook:** [tree\\\\_of\\\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)\\n \\n\\n**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel approach aimed at improving the problem-solving capabilities of auto-regressive large language models (LLMs). The ToT technique is inspired by the human mind\\'s approach for solving complex reasoning tasks through trial and error. In this process, the human mind explores the solution space through a tree-like thought process, allowing for backtracking when necessary. To implement ToT as a software system, we augment an LLM with additional modules including a prompter agent, a checker module, a memory module, and a ToT controller. In order to solve a given problem, these modules engage in a multi-round conversation with the LLM. The memory module records the conversation and state history of the problem solving process, which allows the system to backtrack to the previous steps of the thought-process and explore other directions from there. To verify the effectiveness of the proposed technique, we implemented a ToT-based solver for the Sudoku Puzzle. Experimental results show that the ToT framework can significantly increase the success rate of Sudoku puzzle solving. Our implementation of the ToT-based Sudoku solver is available on GitHub: \\\\\\\\url{[https://github.com/jieyilong/tree-of-thought-puzzle-solver}](https://github.com/jieyilong/tree-of-thought-puzzle-solver%7D)\\n.\\n\\nPlan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models[\\u200b](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models \"Direct link to Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.04091v3\\n \\n* **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\\n \\n* **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.\\n \\n* **Published Date:** 2023-05-06\\n \\n* **URL:** [http://arxiv.org/abs/2305.04091v3](http://arxiv.org/abs/2305.04091v3)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [plan\\\\_and\\\\_execute\\\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive performance in various NLP tasks. To tackle multi-step reasoning tasks, few-shot chain-of-thought (CoT) prompting includes a few manually crafted step-by-step reasoning demonstrations which enable LLMs to explicitly generate reasoning steps and improve their reasoning task accuracy. To eliminate the manual effort, Zero-shot-CoT concatenates the target problem statement with \"Let\\'s think step by step\" as an input prompt to LLMs. Despite the success of Zero-shot-CoT, it still suffers from three pitfalls: calculation errors, missing-step errors, and semantic misunderstanding errors. To address the missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of two components: first, devising a plan to divide the entire task into smaller subtasks, and then carrying out the subtasks according to the plan. To address the calculation errors and improve the quality of generated reasoning steps, we extend PS prompting with more detailed instructions and derive PS+ prompting. We evaluate our proposed prompting strategy on ten datasets across three reasoning problems. The experimental results over GPT-3 show that our proposed zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought Prompting, and has comparable performance with 8-shot CoT prompting on the math reasoning problem. The code can be found at [https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting](https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting)\\n.\\n\\nVisual Instruction Tuning[\\u200b](#visual-instruction-tuning \"Direct link to Visual Instruction Tuning\")\\n\\n----------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2304.08485v2\\n \\n* **Title:** Visual Instruction Tuning\\n \\n* **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.\\n \\n* **Published Date:** 2023-04-17\\n \\n* **URL:** [http://arxiv.org/abs/2304.08485v2](http://arxiv.org/abs/2304.08485v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [Semi\\\\_structured\\\\_and\\\\_multi\\\\_modal\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)\\n , [Semi\\\\_structured\\\\_multi\\\\_modal\\\\_RAG\\\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)\\n \\n\\n**Abstract:** Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available.\\n\\nGenerative Agents: Interactive Simulacra of Human Behavior[\\u200b](#generative-agents-interactive-simulacra-of-human-behavior \"Direct link to Generative Agents: Interactive Simulacra of Human Behavior\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2304.03442v2\\n \\n* **Title:** Generative Agents: Interactive Simulacra of Human Behavior\\n \\n* **Authors:** Joon Sung Park, Joseph C. O\\'Brien, Carrie J. Cai, et al.\\n \\n* **Published Date:** 2023-04-07\\n \\n* **URL:** [http://arxiv.org/abs/2304.03442v2](http://arxiv.org/abs/2304.03442v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [multiagent\\\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)\\n , [generative\\\\_agents\\\\_interactive\\\\_simulacra\\\\_of\\\\_human\\\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)\\n \\n\\n**Abstract:** Believable proxies of human behavior can empower interactive applications ranging from immersive environments to rehearsal spaces for interpersonal communication to prototyping tools. In this paper, we introduce generative agents--computational software agents that simulate believable human behavior. Generative agents wake up, cook breakfast, and head to work; artists paint, while authors write; they form opinions, notice each other, and initiate conversations; they remember and reflect on days past as they plan the next day. To enable generative agents, we describe an architecture that extends a large language model to store a complete record of the agent\\'s experiences using natural language, synthesize those memories over time into higher-level reflections, and retrieve them dynamically to plan behavior. We instantiate generative agents to populate an interactive sandbox environment inspired by The Sims, where end users can interact with a small town of twenty five agents using natural language. In an evaluation, these generative agents produce believable individual and emergent social behaviors: for example, starting with only a single user-specified notion that one agent wants to throw a Valentine\\'s Day party, the agents autonomously spread invitations to the party over the next two days, make new acquaintances, ask each other out on dates to the party, and coordinate to show up for the party together at the right time. We demonstrate through ablation that the components of our agent architecture--observation, planning, and reflection--each contribute critically to the believability of agent behavior. By fusing large language models with computational, interactive agents, this work introduces architectural and interaction patterns for enabling believable simulations of human behavior.\\n\\nCAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society[\\u200b](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society \"Direct link to CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.17760v2\\n \\n* **Title:** CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society\\n \\n* **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.\\n \\n* **Published Date:** 2023-03-31\\n \\n* **URL:** [http://arxiv.org/abs/2303.17760v2](http://arxiv.org/abs/2303.17760v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [camel\\\\_role\\\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)\\n \\n\\n**Abstract:** The rapid advancement of chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents, and provides insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of a society of agents, providing a valuable resource for investigating conversational language models. In particular, we conduct comprehensive studies on instruction-following cooperation in multi-agent settings. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond: [https://github.com/camel-ai/camel](https://github.com/camel-ai/camel)\\n.\\n\\nHuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face[\\u200b](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face \"Direct link to HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.17580v4\\n \\n* **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\\n \\n* **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.\\n \\n* **Published Date:** 2023-03-30\\n \\n* **URL:** [http://arxiv.org/abs/2303.17580v4](http://arxiv.org/abs/2303.17580v4)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.autonomous\\\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)\\n \\n * **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)\\n \\n\\n**Abstract:** Solving complicated AI tasks with different domains and modalities is a key step toward artificial general intelligence. While there are numerous AI models available for various domains and modalities, they cannot handle complicated AI tasks autonomously. Considering large language models (LLMs) have exhibited exceptional abilities in language understanding, generation, interaction, and reasoning, we advocate that LLMs could act as a controller to manage existing AI models to solve complicated AI tasks, with language serving as a generic interface to empower this. Based on this philosophy, we present HuggingGPT, an LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI models in machine learning communities (e.g., Hugging Face) to solve AI tasks. Specifically, we use ChatGPT to conduct task planning when receiving a user request, select models according to their function descriptions available in Hugging Face, execute each subtask with the selected AI model, and summarize the response according to the execution results. By leveraging the strong language capability of ChatGPT and abundant AI models in Hugging Face, HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different modalities and domains and achieve impressive results in language, vision, speech, and other challenging tasks, which paves a new way towards the realization of artificial general intelligence.\\n\\nGPT-4 Technical Report[\\u200b](#gpt-4-technical-report \"Direct link to GPT-4 Technical Report\")\\n\\n-------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.08774v6\\n \\n* **Title:** GPT-4 Technical Report\\n \\n* **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.\\n \\n* **Published Date:** 2023-03-15\\n \\n* **URL:** [http://arxiv.org/abs/2303.08774v6](http://arxiv.org/abs/2303.08774v6)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/vectorstores/mongodb\\\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)\\n \\n\\n**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4\\'s performance based on models trained with no more than 1/1,000th the compute of GPT-4.\\n\\nA Watermark for Large Language Models[\\u200b](#a-watermark-for-large-language-models \"Direct link to A Watermark for Large Language Models\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2301.10226v4\\n \\n* **Title:** A Watermark for Large Language Models\\n \\n* **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.\\n \\n* **Published Date:** 2023-01-24\\n \\n* **URL:** [http://arxiv.org/abs/2301.10226v4](http://arxiv.org/abs/2301.10226v4)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Potential harms of large language models can be mitigated by watermarking model output, i.e., embedding signals into generated text that are invisible to humans but algorithmically detectable from a short span of tokens. We propose a watermarking framework for proprietary language models. The watermark can be embedded with negligible impact on text quality, and can be detected using an efficient open-source algorithm without access to the language model API or parameters. The watermark works by selecting a randomized set of \"green\" tokens before a word is generated, and then softly promoting use of green tokens during sampling. We propose a statistical test for detecting the watermark with interpretable p-values, and derive an information-theoretic framework for analyzing the sensitivity of the watermark. We test the watermark using a multi-billion parameter model from the Open Pretrained Transformer (OPT) family, and discuss robustness and security.\\n\\nPrecise Zero-Shot Dense Retrieval without Relevance Labels[\\u200b](#precise-zero-shot-dense-retrieval-without-relevance-labels \"Direct link to Precise Zero-Shot Dense Retrieval without Relevance Labels\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2212.10496v1\\n \\n* **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels\\n \\n* **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.\\n \\n* **Published Date:** 2022-12-20\\n \\n* **URL:** [http://arxiv.org/abs/2212.10496v1](http://arxiv.org/abs/2212.10496v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)\\n \\n * **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)\\n \\n * **Cookbook:** [hypothetical\\\\_document\\\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)\\n \\n\\n**Abstract:** While dense retrieval has been shown effective and efficient across tasks and languages, it remains difficult to create effective fully zero-shot dense retrieval systems when no relevance label is available. In this paper, we recognize the difficulty of zero-shot learning and encoding relevance. Instead, we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a query, HyDE first zero-shot instructs an instruction-following language model (e.g. InstructGPT) to generate a hypothetical document. The document captures relevance patterns but is unreal and may contain false details. Then, an unsupervised contrastively learned encoder~(e.g. Contriever) encodes the document into an embedding vector. This vector identifies a neighborhood in the corpus embedding space, where similar real documents are retrieved based on vector similarity. This second step ground the generated document to the actual corpus, with the encoder\\'s dense bottleneck filtering out the incorrect details. Our experiments show that HyDE significantly outperforms the state-of-the-art unsupervised dense retriever Contriever and shows strong performance comparable to fine-tuned retrievers, across various tasks (e.g. web search, QA, fact verification) and languages~(e.g. sw, ko, ja).\\n\\nRobust and Explainable Identification of Logical Fallacies in Natural Language Arguments[\\u200b](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments \"Direct link to Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2212.07425v3\\n \\n* **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\\n \\n* **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.\\n \\n* **Published Date:** 2022-12-12\\n \\n* **URL:** [http://arxiv.org/abs/2212.07425v3](http://arxiv.org/abs/2212.07425v3)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.fallacy\\\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)\\n \\n\\n**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been amplified in the Internet era. Given the volume of data and the subtlety of identifying violations of argumentation norms, supporting information analytics tasks, like content moderation, with trustworthy methods that can identify logical fallacies is essential. In this paper, we formalize prior theoretical work on logical fallacies into a comprehensive three-stage evaluation framework of detection, coarse-grained, and fine-grained classification. We adapt existing evaluation datasets for each stage of the evaluation. We employ three families of robust and explainable methods based on prototype reasoning, instance-based reasoning, and knowledge injection. The methods combine language models with background knowledge and explainable mechanisms. Moreover, we address data sparsity with strategies for data augmentation and curriculum learning. Our three-stage framework natively consolidates prior datasets and methods from existing tasks, like propaganda detection, serving as an overarching evaluation testbed. We extensively evaluate these methods on our datasets, focusing on their robustness and explainability. Our results provide insight into the strengths and weaknesses of the methods on different components and fallacy classes, indicating that fallacy identification is a challenging task that may require specialized forms of reasoning to capture various classes. We share our open-source code and data on GitHub to support further work on logical fallacy identification.\\n\\nComplementary Explanations for Effective In-Context Learning[\\u200b](#complementary-explanations-for-effective-in-context-learning \"Direct link to Complementary Explanations for Effective In-Context Learning\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2211.13892v2\\n \\n* **Title:** Complementary Explanations for Effective In-Context Learning\\n \\n* **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.\\n \\n* **Published Date:** 2022-11-25\\n \\n* **URL:** [http://arxiv.org/abs/2211.13892v2](http://arxiv.org/abs/2211.13892v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)\\n \\n\\n**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in learning from explanations in prompts, but there has been limited understanding of exactly how these explanations function or why they are effective. This work aims to better understand the mechanisms by which explanations are used for in-context learning. We first study the impact of two different factors on the performance of prompts with explanations: the computation trace (the way the solution is decomposed) and the natural language used to express the prompt. By perturbing explanations on three controlled tasks, we show that both factors contribute to the effectiveness of explanations. We further study how to form maximally effective sets of explanations for solving a given test query. We find that LLMs can benefit from the complementarity of the explanation set: diverse reasoning skills shown by different exemplars can lead to better performance. Therefore, we propose a maximal marginal relevance-based exemplar selection approach for constructing exemplar sets that are both relevant as well as complementary, which successfully improves the in-context learning performance across three real-world tasks on multiple LLMs.\\n\\nPAL: Program-aided Language Models[\\u200b](#pal-program-aided-language-models \"Direct link to PAL: Program-aided Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2211.10435v2\\n \\n* **Title:** PAL: Program-aided Language Models\\n \\n* **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.\\n \\n* **Published Date:** 2022-11-18\\n \\n* **URL:** [http://arxiv.org/abs/2211.10435v2](http://arxiv.org/abs/2211.10435v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)\\n , [langchain\\\\_experimental.pal\\\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)\\n \\n * **Cookbook:** [program\\\\_aided\\\\_language\\\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability to perform arithmetic and symbolic reasoning tasks, when provided with a few examples at test time (\"few-shot prompting\"). Much of this success can be attributed to prompting methods such as \"chain-of-thought\\'\\', which employ LLMs for both understanding the problem description by decomposing it into steps, as well as solving each step of the problem. While LLMs seem to be adept at this sort of step-by-step decomposition, LLMs often make logical and arithmetic mistakes in the solution part, even when the problem is decomposed correctly. In this paper, we present Program-Aided Language models (PAL): a novel approach that uses the LLM to read natural language problems and generate programs as the intermediate reasoning steps, but offloads the solution step to a runtime such as a Python interpreter. With PAL, decomposing the natural language problem into runnable steps remains the only learning task for the LLM, while solving is delegated to the interpreter. We demonstrate this synergy between a neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all these natural language reasoning tasks, generating code using an LLM and reasoning using a Python interpreter leads to more accurate results than much larger models. For example, PAL using Codex achieves state-of-the-art few-shot accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B which uses chain-of-thought by absolute 15% top-1. Our code and data are publicly available at [http://reasonwithpal.com/](http://reasonwithpal.com/)\\n .\\n\\nReAct: Synergizing Reasoning and Acting in Language Models[\\u200b](#react-synergizing-reasoning-and-acting-in-language-models \"Direct link to ReAct: Synergizing Reasoning and Acting in Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2210.03629v3\\n \\n* **Title:** ReAct: Synergizing Reasoning and Acting in Language Models\\n \\n* **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.\\n \\n* **Published Date:** 2022-10-06\\n \\n* **URL:** [http://arxiv.org/abs/2210.03629v3](http://arxiv.org/abs/2210.03629v3)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere)\\n , [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface)\\n , [docs/integrations/tools/ionic\\\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)\\n \\n * **API Reference:** [langchain...create\\\\_react\\\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)\\n , [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)\\n \\n\\n**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities across tasks in language understanding and interactive decision making, their abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g. action plan generation) have primarily been studied as separate topics. In this paper, we explore the use of LLMs to generate both reasoning traces and task-specific actions in an interleaved manner, allowing for greater synergy between the two: reasoning traces help the model induce, track, and update action plans as well as handle exceptions, while actions allow it to interface with external sources, such as knowledge bases or environments, to gather additional information. We apply our approach, named ReAct, to a diverse set of language and decision making tasks and demonstrate its effectiveness over state-of-the-art baselines, as well as improved human interpretability and trustworthiness over methods without reasoning or acting components. Concretely, on question answering (HotpotQA) and fact verification (Fever), ReAct overcomes issues of hallucination and error propagation prevalent in chain-of-thought reasoning by interacting with a simple Wikipedia API, and generates human-like task-solving trajectories that are more interpretable than baselines without reasoning traces. On two interactive decision making benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and reinforcement learning methods by an absolute success rate of 34% and 10% respectively, while being prompted with only one or two in-context examples. Project site with code: [https://react-lm.github.io](https://react-lm.github.io)\\n\\nDeep Lake: a Lakehouse for Deep Learning[\\u200b](#deep-lake-a-lakehouse-for-deep-learning \"Direct link to Deep Lake: a Lakehouse for Deep Learning\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2209.10785v2\\n \\n* **Title:** Deep Lake: a Lakehouse for Deep Learning\\n \\n* **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.\\n \\n* **Published Date:** 2022-09-22\\n \\n* **URL:** [http://arxiv.org/abs/2209.10785v2](http://arxiv.org/abs/2209.10785v2)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/providers/activeloop\\\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)\\n \\n\\n**Abstract:** Traditional data lakes provide critical data infrastructure for analytical workloads by enabling time travel, running SQL queries, ingesting data with ACID transactions, and visualizing petabyte-scale datasets on cloud storage. They allow organizations to break down data silos, unlock data-driven decision-making, improve operational efficiency, and reduce costs. However, as deep learning usage increases, traditional data lakes are not well-designed for applications such as natural language processing (NLP), audio processing, computer vision, and applications involving non-tabular datasets. This paper presents Deep Lake, an open-source lakehouse for deep learning applications developed at Activeloop. Deep Lake maintains the benefits of a vanilla data lake with one key difference: it stores complex data, such as images, videos, annotations, as well as tabular data, in the form of tensors and rapidly streams the data over the network to (a) Tensor Query Language, (b) in-browser visualization engine, or (c) deep learning frameworks without sacrificing GPU utilization. Datasets stored in Deep Lake can be accessed from PyTorch, TensorFlow, JAX, and integrate with numerous MLOps tools.\\n\\nBitext Mining Using Distilled Sentence Representations for Low-Resource Languages[\\u200b](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages \"Direct link to Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2205.12654v1\\n \\n* **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\\n \\n* **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk\\n \\n* **Published Date:** 2022-05-25\\n \\n* **URL:** [http://arxiv.org/abs/2205.12654v1](http://arxiv.org/abs/2205.12654v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)\\n \\n\\n**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent languages is challenging, in particular to cover the long tail of low-resource languages. A promising approach has been to train one-for-all multilingual models capable of cross-lingual transfer, but these models often suffer from insufficient capacity and interference between unrelated languages. Instead, we move away from this approach and focus on training multiple language (family) specific representations, but most prominently enable all languages to still be encoded in the same representational space. To achieve this, we focus on teacher-student training, allowing all encoders to be mutually compatible for bitext mining, and enabling fast learning of new languages. We introduce a new teacher-student training scheme which combines supervised and self-supervised training, allowing encoders to take advantage of monolingual training data, which is valuable in the low-resource setting. Our approach significantly outperforms the original LASER encoder. We study very low-resource languages and handle 50 African languages, many of which are not covered by any other model. For these languages, we train sentence encoders, mine bitexts, and validate the bitexts by training NMT systems.\\n\\nEvaluating the Text-to-SQL Capabilities of Large Language Models[\\u200b](#evaluating-the-text-to-sql-capabilities-of-large-language-models \"Direct link to Evaluating the Text-to-SQL Capabilities of Large Language Models\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2204.00498v1\\n \\n* **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models\\n \\n* **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau\\n \\n* **Published Date:** 2022-03-15\\n \\n* **URL:** [http://arxiv.org/abs/2204.00498v1](http://arxiv.org/abs/2204.00498v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)\\n , [langchain\\\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)\\n \\n\\n**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex language model. We find that, without any finetuning, Codex is a strong baseline on the Spider benchmark; we also analyze the failure modes of Codex in this setting. Furthermore, we demonstrate on the GeoQuery and Scholar benchmarks that a small number of in-domain examples provided in the prompt enables Codex to perform better than state-of-the-art models finetuned on such few-shot examples.\\n\\nLocally Typical Sampling[\\u200b](#locally-typical-sampling \"Direct link to Locally Typical Sampling\")\\n\\n-------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2202.00666v5\\n \\n* **Title:** Locally Typical Sampling\\n \\n* **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.\\n \\n* **Published Date:** 2022-02-01\\n \\n* **URL:** [http://arxiv.org/abs/2202.00666v5](http://arxiv.org/abs/2202.00666v5)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Today\\'s probabilistic language generators fall short when it comes to producing coherent and fluent text despite the fact that the underlying models perform well under standard metrics, e.g., perplexity. This discrepancy has puzzled the language generation community for the last few years. In this work, we posit that the abstraction of natural language generation as a discrete stochastic process--which allows for an information-theoretic analysis--can provide new insights into the behavior of probabilistic language generators, e.g., why high-probability texts can be dull or repetitive. Humans use language as a means of communicating information, aiming to do so in a simultaneously efficient and error-minimizing manner; in fact, psycholinguistics research suggests humans choose each word in a string with this subconscious goal in mind. We formally define the set of strings that meet this criterion: those for which each word has an information content close to the expected information content, i.e., the conditional entropy of our model. We then propose a simple and efficient procedure for enforcing this criterion when generating from probabilistic models, which we call locally typical sampling. Automatic and human evaluations show that, in comparison to nucleus and top-k sampling, locally typical sampling offers competitive performance (in both abstractive summarization and story generation) in terms of quality while consistently reducing degenerate repetitions.\\n\\nLearning Transferable Visual Models From Natural Language Supervision[\\u200b](#learning-transferable-visual-models-from-natural-language-supervision \"Direct link to Learning Transferable Visual Models From Natural Language Supervision\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2103.00020v1\\n \\n* **Title:** Learning Transferable Visual Models From Natural Language Supervision\\n \\n* **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.\\n \\n* **Published Date:** 2021-02-26\\n \\n* **URL:** [http://arxiv.org/abs/2103.00020v1](http://arxiv.org/abs/2103.00020v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.open\\\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)\\n \\n\\n**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at [https://github.com/OpenAI/CLIP](https://github.com/OpenAI/CLIP)\\n.\\n\\nCTRL: A Conditional Transformer Language Model for Controllable Generation[\\u200b](#ctrl-a-conditional-transformer-language-model-for-controllable-generation \"Direct link to CTRL: A Conditional Transformer Language Model for Controllable Generation\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 1909.05858v2\\n \\n* **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation\\n \\n* **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.\\n \\n* **Published Date:** 2019-09-11\\n \\n* **URL:** [http://arxiv.org/abs/1909.05858v2](http://arxiv.org/abs/1909.05858v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution. We have released multiple full-sized, pretrained versions of CTRL at [https://github.com/salesforce/ctrl](https://github.com/salesforce/ctrl)\\n.\\n\\nSentence-BERT: Sentence Embeddings using Siamese BERT-Networks[\\u200b](#sentence-bert-sentence-embeddings-using-siamese-bert-networks \"Direct link to Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 1908.10084v1\\n \\n* **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\\n \\n* **Authors:** Nils Reimers, Iryna Gurevych\\n \\n* **Published Date:** 2019-08-27\\n \\n* **URL:** [http://arxiv.org/abs/1908.10084v1](http://arxiv.org/abs/1908.10084v1)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/text\\\\_embedding/sentence\\\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)\\n \\n\\n**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/arxiv_references.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Summary](#summary)\\n \\n* [Self-Discover: Large Language Models Self-Compose Reasoning Structures](#self-discover-large-language-models-self-compose-reasoning-structures)\\n \\n* [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval)\\n \\n* [Corrective Retrieval Augmented Generation](#corrective-retrieval-augmented-generation)\\n \\n* [Mixtral of Experts](#mixtral-of-experts)\\n \\n* [Dense X Retrieval: What Retrieval Granularity Should We Use?](#dense-x-retrieval-what-retrieval-granularity-should-we-use)\\n \\n* [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models)\\n \\n* [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection)\\n \\n* [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models)\\n \\n* [Llama 2: Open Foundation and Fine-Tuned Chat Models](#llama-2-open-foundation-and-fine-tuned-chat-models)\\n \\n* [Query Rewriting for Retrieval-Augmented Large Language Models](#query-rewriting-for-retrieval-augmented-large-language-models)\\n \\n* [Large Language Model Guided Tree-of-Thought](#large-language-model-guided-tree-of-thought)\\n \\n* [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models)\\n \\n* [Visual Instruction Tuning](#visual-instruction-tuning)\\n \\n* [Generative Agents: Interactive Simulacra of Human Behavior](#generative-agents-interactive-simulacra-of-human-behavior)\\n \\n* [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society)\\n \\n* [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face)\\n \\n* [GPT-4 Technical Report](#gpt-4-technical-report)\\n \\n* [A Watermark for Large Language Models](#a-watermark-for-large-language-models)\\n \\n* [Precise Zero-Shot Dense Retrieval without Relevance Labels](#precise-zero-shot-dense-retrieval-without-relevance-labels)\\n \\n* [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments)\\n \\n* [Complementary Explanations for Effective In-Context Learning](#complementary-explanations-for-effective-in-context-learning)\\n \\n* [PAL: Program-aided Language Models](#pal-program-aided-language-models)\\n \\n* [ReAct: Synergizing Reasoning and Acting in Language Models](#react-synergizing-reasoning-and-acting-in-language-models)\\n \\n* [Deep Lake: a Lakehouse for Deep Learning](#deep-lake-a-lakehouse-for-deep-learning)\\n \\n* [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages)\\n \\n* [Evaluating the Text-to-SQL Capabilities of Large Language Models](#evaluating-the-text-to-sql-capabilities-of-large-language-models)\\n \\n* [Locally Typical Sampling](#locally-typical-sampling)\\n \\n* [Learning Transferable Visual Models From Natural Language Supervision](#learning-transferable-visual-models-from-natural-language-supervision)\\n \\n* [CTRL: A Conditional Transformer Language Model for Controllable Generation](#ctrl-a-conditional-transformer-language-model-for-controllable-generation)\\n \\n* [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](#sentence-bert-sentence-embeddings-using-siamese-bert-networks)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/arxiv_references/', 'pageStatusCode': 200}}, {'content': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nDependents\\n==========\\n\\nDependents stats for `langchain-ai/langchain`\\n\\n[![](https://img.shields.io/static/v1?label=Used%20by&message=41717&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)\\n [![](https://img.shields.io/static/v1?label=Used%20by%20(public)](https://github.com/langchain-ai/langchain/network/dependents)\\n [![](https://img.shields.io/static/v1?label=Used%20by%20(private)](https://github.com/langchain-ai/langchain/network/dependents)\\n\\n\\\\[update: `2023-12-08`; only dependent repositories with Stars > 100\\\\]\\n\\n| Repository | Stars |\\n| --- | --- |\\n| [AntonOsika/gpt-engineer](https://github.com/AntonOsika/gpt-engineer) | 46514 |\\n| [imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 44439 |\\n| [LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 35906 |\\n| [hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 35528 |\\n| [moymix/TaskMatrix](https://github.com/moymix/TaskMatrix) | 34342 |\\n| [geekan/MetaGPT](https://github.com/geekan/MetaGPT) | 31126 |\\n| [streamlit/streamlit](https://github.com/streamlit/streamlit) | 28911 |\\n| [reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 27833 |\\n| [StanGirard/quivr](https://github.com/StanGirard/quivr) | 26032 |\\n| [OpenBB-finance/OpenBBTerminal](https://github.com/OpenBB-finance/OpenBBTerminal) | 24946 |\\n| [run-llama/llama\\\\_index](https://github.com/run-llama/llama_index) | 24859 |\\n| [jmorganca/ollama](https://github.com/jmorganca/ollama) | 20849 |\\n| [openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 20249 |\\n| [chatchat-space/Langchain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat) | 19305 |\\n| [mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 19172 |\\n| [PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 17528 |\\n| [cube-js/cube](https://github.com/cube-js/cube) | 16575 |\\n| [mlflow/mlflow](https://github.com/mlflow/mlflow) | 16000 |\\n| [mudler/LocalAI](https://github.com/mudler/LocalAI) | 14067 |\\n| [logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 13679 |\\n| [GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 13648 |\\n| [arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 13423 |\\n| [openai/evals](https://github.com/openai/evals) | 12649 |\\n| [airbytehq/airbyte](https://github.com/airbytehq/airbyte) | 12460 |\\n| [langgenius/dify](https://github.com/langgenius/dify) | 11859 |\\n| [databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10672 |\\n| [AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 9437 |\\n| [langchain-ai/langchainjs](https://github.com/langchain-ai/langchainjs) | 9227 |\\n| [gventuri/pandas-ai](https://github.com/gventuri/pandas-ai) | 9203 |\\n| [aws/amazon-sagemaker-examples](https://github.com/aws/amazon-sagemaker-examples) | 9079 |\\n| [h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 8945 |\\n| [PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 7550 |\\n| [bentoml/OpenLLM](https://github.com/bentoml/OpenLLM) | 6957 |\\n| [THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6801 |\\n| [microsoft/promptflow](https://github.com/microsoft/promptflow) | 6776 |\\n| [cpacker/MemGPT](https://github.com/cpacker/MemGPT) | 6642 |\\n| [joshpxyne/gpt-migrate](https://github.com/joshpxyne/gpt-migrate) | 6482 |\\n| [zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 6037 |\\n| [embedchain/embedchain](https://github.com/embedchain/embedchain) | 6023 |\\n| [mage-ai/mage-ai](https://github.com/mage-ai/mage-ai) | 6019 |\\n| [assafelovic/gpt-researcher](https://github.com/assafelovic/gpt-researcher) | 5936 |\\n| [sweepai/sweep](https://github.com/sweepai/sweep) | 5855 |\\n| [wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 5766 |\\n| [zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 5710 |\\n| [pdm-project/pdm](https://github.com/pdm-project/pdm) | 5665 |\\n| [GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 5568 |\\n| [gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 5507 |\\n| [Shaunwei/RealChar](https://github.com/Shaunwei/RealChar) | 5501 |\\n| [facebookresearch/llama-recipes](https://github.com/facebookresearch/llama-recipes) | 5477 |\\n| [serge-chat/serge](https://github.com/serge-chat/serge) | 5221 |\\n| [run-llama/rags](https://github.com/run-llama/rags) | 4916 |\\n| [openchatai/OpenChat](https://github.com/openchatai/OpenChat) | 4870 |\\n| [danswer-ai/danswer](https://github.com/danswer-ai/danswer) | 4774 |\\n| [langchain-ai/opengpts](https://github.com/langchain-ai/opengpts) | 4709 |\\n| [postgresml/postgresml](https://github.com/postgresml/postgresml) | 4639 |\\n| [MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 4582 |\\n| [intel-analytics/BigDL](https://github.com/intel-analytics/BigDL) | 4581 |\\n| [yihong0618/xiaogpt](https://github.com/yihong0618/xiaogpt) | 4359 |\\n| [RayVentura/ShortGPT](https://github.com/RayVentura/ShortGPT) | 4357 |\\n| [Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 4317 |\\n| [madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4289 |\\n| [apache/nifi](https://github.com/apache/nifi) | 4098 |\\n| [langchain-ai/chat-langchain](https://github.com/langchain-ai/chat-langchain) | 4091 |\\n| [aiwaves-cn/agents](https://github.com/aiwaves-cn/agents) | 4073 |\\n| [krishnaik06/The-Grand-Complete-Data-Science-Materials](https://github.com/krishnaik06/The-Grand-Complete-Data-Science-Materials) | 4065 |\\n| [khoj-ai/khoj](https://github.com/khoj-ai/khoj) | 4016 |\\n| [Azure/azure-sdk-for-python](https://github.com/Azure/azure-sdk-for-python) | 3941 |\\n| [PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 3915 |\\n| [OpenBMB/ToolBench](https://github.com/OpenBMB/ToolBench) | 3799 |\\n| [marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3771 |\\n| [kyegomez/tree-of-thoughts](https://github.com/kyegomez/tree-of-thoughts) | 3688 |\\n| [Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 3543 |\\n| [llm-workflow-engine/llm-workflow-engine](https://github.com/llm-workflow-engine/llm-workflow-engine) | 3515 |\\n| [shroominic/codeinterpreter-api](https://github.com/shroominic/codeinterpreter-api) | 3425 |\\n| [openchatai/OpenCopilot](https://github.com/openchatai/OpenCopilot) | 3418 |\\n| [josStorer/RWKV-Runner](https://github.com/josStorer/RWKV-Runner) | 3297 |\\n| [whitead/paper-qa](https://github.com/whitead/paper-qa) | 3280 |\\n| [homanp/superagent](https://github.com/homanp/superagent) | 3258 |\\n| [ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui) | 3199 |\\n| [OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 3099 |\\n| [project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 3090 |\\n| [OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2989 |\\n| [xlang-ai/OpenAgents](https://github.com/xlang-ai/OpenAgents) | 2825 |\\n| [dataelement/bisheng](https://github.com/dataelement/bisheng) | 2797 |\\n| [Mintplex-Labs/anything-llm](https://github.com/Mintplex-Labs/anything-llm) | 2784 |\\n| [OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2734 |\\n| [run-llama/llama-hub](https://github.com/run-llama/llama-hub) | 2721 |\\n| [SamurAIGPT/EmbedAI](https://github.com/SamurAIGPT/EmbedAI) | 2647 |\\n| [NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 2637 |\\n| [X-D-Lab/LangChain-ChatGLM-Webui](https://github.com/X-D-Lab/LangChain-ChatGLM-Webui) | 2532 |\\n| [GerevAI/gerev](https://github.com/GerevAI/gerev) | 2517 |\\n| [keephq/keep](https://github.com/keephq/keep) | 2448 |\\n| [yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 2397 |\\n| [OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 2324 |\\n| [IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 2241 |\\n| [YiVal/YiVal](https://github.com/YiVal/YiVal) | 2232 |\\n| [jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 2189 |\\n| [Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 2136 |\\n| [microsoft/TaskWeaver](https://github.com/microsoft/TaskWeaver) | 2126 |\\n| [hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 2083 |\\n| [FlagOpen/FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) | 2053 |\\n| [paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1999 |\\n| [hegelai/prompttools](https://github.com/hegelai/prompttools) | 1984 |\\n| [mckinsey/vizro](https://github.com/mckinsey/vizro) | 1951 |\\n| [vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1868 |\\n| [dot-agent/openAMS](https://github.com/dot-agent/openAMS) | 1796 |\\n| [explodinggradients/ragas](https://github.com/explodinggradients/ragas) | 1766 |\\n| [AI-Citizen/SolidGPT](https://github.com/AI-Citizen/SolidGPT) | 1761 |\\n| [Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1696 |\\n| [run-llama/sec-insights](https://github.com/run-llama/sec-insights) | 1654 |\\n| [avinashkranjan/Amazing-Python-Scripts](https://github.com/avinashkranjan/Amazing-Python-Scripts) | 1635 |\\n| [microsoft/WhatTheHack](https://github.com/microsoft/WhatTheHack) | 1629 |\\n| [noahshinn/reflexion](https://github.com/noahshinn/reflexion) | 1625 |\\n| [psychic-api/psychic](https://github.com/psychic-api/psychic) | 1618 |\\n| [Forethought-Technologies/AutoChain](https://github.com/Forethought-Technologies/AutoChain) | 1611 |\\n| [pinterest/querybook](https://github.com/pinterest/querybook) | 1586 |\\n| [refuel-ai/autolabel](https://github.com/refuel-ai/autolabel) | 1553 |\\n| [jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 1537 |\\n| [jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1522 |\\n| [agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1493 |\\n| [ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1484 |\\n| [greshake/llm-security](https://github.com/greshake/llm-security) | 1483 |\\n| [promptfoo/promptfoo](https://github.com/promptfoo/promptfoo) | 1480 |\\n| [milvus-io/bootcamp](https://github.com/milvus-io/bootcamp) | 1477 |\\n| [richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1475 |\\n| [melih-unsal/DemoGPT](https://github.com/melih-unsal/DemoGPT) | 1428 |\\n| [YORG-AI/Open-Assistant](https://github.com/YORG-AI/Open-Assistant) | 1419 |\\n| [101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 1416 |\\n| [jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1408 |\\n| [mmz-001/knowledge\\\\_gpt](https://github.com/mmz-001/knowledge_gpt) | 1398 |\\n| [intel/intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers) | 1387 |\\n| [Azure/azureml-examples](https://github.com/Azure/azureml-examples) | 1385 |\\n| [lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1367 |\\n| [eyurtsev/kor](https://github.com/eyurtsev/kor) | 1355 |\\n| [xusenlinzy/api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) | 1325 |\\n| [griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 1323 |\\n| [SuperDuperDB/superduperdb](https://github.com/SuperDuperDB/superduperdb) | 1290 |\\n| [cofactoryai/textbase](https://github.com/cofactoryai/textbase) | 1284 |\\n| [psychic-api/rag-stack](https://github.com/psychic-api/rag-stack) | 1260 |\\n| [filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 1250 |\\n| [nod-ai/SHARK](https://github.com/nod-ai/SHARK) | 1237 |\\n| [pluralsh/plural](https://github.com/pluralsh/plural) | 1234 |\\n| [cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 1194 |\\n| [LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya) | 1184 |\\n| [poe-platform/server-bot-quick-start](https://github.com/poe-platform/server-bot-quick-start) | 1182 |\\n| [microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 1180 |\\n| [juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1171 |\\n| [visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 1156 |\\n| [alejandro-ao/ask-multiple-pdfs](https://github.com/alejandro-ao/ask-multiple-pdfs) | 1153 |\\n| [ThousandBirdsInc/chidori](https://github.com/ThousandBirdsInc/chidori) | 1152 |\\n| [irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 1137 |\\n| [SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 1083 |\\n| [ray-project/llm-applications](https://github.com/ray-project/llm-applications) | 1080 |\\n| [run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 1072 |\\n| [jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 1041 |\\n| [MetaGLM/FinGLM](https://github.com/MetaGLM/FinGLM) | 1035 |\\n| [peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 1020 |\\n| [Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 991 |\\n| [langchain-ai/langserve](https://github.com/langchain-ai/langserve) | 983 |\\n| [THUDM/AgentTuning](https://github.com/THUDM/AgentTuning) | 976 |\\n| [rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 975 |\\n| [codeacme17/examor](https://github.com/codeacme17/examor) | 964 |\\n| [all-in-aigc/gpts-works](https://github.com/all-in-aigc/gpts-works) | 946 |\\n| [Ikaros-521/AI-Vtuber](https://github.com/Ikaros-521/AI-Vtuber) | 946 |\\n| [microsoft/Llama-2-Onnx](https://github.com/microsoft/Llama-2-Onnx) | 898 |\\n| [cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 895 |\\n| [ricklamers/shell-ai](https://github.com/ricklamers/shell-ai) | 893 |\\n| [modelscope/modelscope-agent](https://github.com/modelscope/modelscope-agent) | 893 |\\n| [seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 886 |\\n| [ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 880 |\\n| [kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference](https://github.com/kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference) | 872 |\\n| [corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 846 |\\n| [hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 841 |\\n| [kreneskyp/ix](https://github.com/kreneskyp/ix) | 821 |\\n| [Link-AGI/AutoAgents](https://github.com/Link-AGI/AutoAgents) | 820 |\\n| [truera/trulens](https://github.com/truera/trulens) | 794 |\\n| [Dataherald/dataherald](https://github.com/Dataherald/dataherald) | 788 |\\n| [sunlabuiuc/PyHealth](https://github.com/sunlabuiuc/PyHealth) | 783 |\\n| [jondurbin/airoboros](https://github.com/jondurbin/airoboros) | 783 |\\n| [pyspark-ai/pyspark-ai](https://github.com/pyspark-ai/pyspark-ai) | 782 |\\n| [confident-ai/deepeval](https://github.com/confident-ai/deepeval) | 780 |\\n| [billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 777 |\\n| [langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent) | 776 |\\n| [akshata29/entaoai](https://github.com/akshata29/entaoai) | 771 |\\n| [LambdaLabsML/examples](https://github.com/LambdaLabsML/examples) | 770 |\\n| [getmetal/motorhead](https://github.com/getmetal/motorhead) | 768 |\\n| [Dicklesworthstone/swiss\\\\_army\\\\_llama](https://github.com/Dicklesworthstone/swiss_army_llama) | 757 |\\n| [ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 757 |\\n| [msoedov/langcorn](https://github.com/msoedov/langcorn) | 754 |\\n| [e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 753 |\\n| [microsoft/sample-app-aoai-chatGPT](https://github.com/microsoft/sample-app-aoai-chatGPT) | 749 |\\n| [explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 731 |\\n| [MiuLab/Taiwan-LLM](https://github.com/MiuLab/Taiwan-LLM) | 716 |\\n| [whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 702 |\\n| [Azure-Samples/openai](https://github.com/Azure-Samples/openai) | 692 |\\n| [iusztinpaul/hands-on-llms](https://github.com/iusztinpaul/hands-on-llms) | 687 |\\n| [safevideo/autollm](https://github.com/safevideo/autollm) | 682 |\\n| [OpenGenerativeAI/GenossGPT](https://github.com/OpenGenerativeAI/GenossGPT) | 669 |\\n| [NoDataFound/hackGPT](https://github.com/NoDataFound/hackGPT) | 663 |\\n| [AILab-CVC/GPT4Tools](https://github.com/AILab-CVC/GPT4Tools) | 662 |\\n| [langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 657 |\\n| [yvann-ba/Robby-chatbot](https://github.com/yvann-ba/Robby-chatbot) | 639 |\\n| [alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 635 |\\n| [amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 630 |\\n| [microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 621 |\\n| [aws-samples/aws-genai-llm-chatbot](https://github.com/aws-samples/aws-genai-llm-chatbot) | 616 |\\n| [NeumTry/NeumAI](https://github.com/NeumTry/NeumAI) | 605 |\\n| [namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 599 |\\n| [plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 595 |\\n| [marimo-team/marimo](https://github.com/marimo-team/marimo) | 591 |\\n| [yakami129/VirtualWife](https://github.com/yakami129/VirtualWife) | 586 |\\n| [xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 584 |\\n| [jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 573 |\\n| [dgarnitz/vectorflow](https://github.com/dgarnitz/vectorflow) | 568 |\\n| [yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 564 |\\n| [daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 563 |\\n| [traceloop/openllmetry](https://github.com/traceloop/openllmetry) | 559 |\\n| [Agenta-AI/agenta](https://github.com/Agenta-AI/agenta) | 546 |\\n| [michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 545 |\\n| [jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 544 |\\n| [mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 533 |\\n| [marella/chatdocs](https://github.com/marella/chatdocs) | 532 |\\n| [opentensor/bittensor](https://github.com/opentensor/bittensor) | 532 |\\n| [DjangoPeng/openai-quickstart](https://github.com/DjangoPeng/openai-quickstart) | 527 |\\n| [freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 517 |\\n| [sidhq/Multi-GPT](https://github.com/sidhq/Multi-GPT) | 515 |\\n| [alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 514 |\\n| [sajjadium/ctf-archives](https://github.com/sajjadium/ctf-archives) | 507 |\\n| [continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 502 |\\n| [steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 494 |\\n| [mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 493 |\\n| [langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 492 |\\n| [logan-markewich/llama\\\\_index\\\\_starter\\\\_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 483 |\\n| [datawhalechina/llm-universe](https://github.com/datawhalechina/llm-universe) | 475 |\\n| [leondz/garak](https://github.com/leondz/garak) | 464 |\\n| [RedisVentures/ArXivChatGuru](https://github.com/RedisVentures/ArXivChatGuru) | 461 |\\n| [Anil-matcha/Chatbase](https://github.com/Anil-matcha/Chatbase) | 455 |\\n| [Aiyu-awa/luna-ai](https://github.com/Aiyu-awa/luna-ai) | 450 |\\n| [DataDog/dd-trace-py](https://github.com/DataDog/dd-trace-py) | 450 |\\n| [Azure-Samples/miyagi](https://github.com/Azure-Samples/miyagi) | 449 |\\n| [poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 447 |\\n| [onlyphantom/llm-python](https://github.com/onlyphantom/llm-python) | 446 |\\n| [junruxiong/IncarnaMind](https://github.com/junruxiong/IncarnaMind) | 441 |\\n| [CarperAI/OpenELM](https://github.com/CarperAI/OpenELM) | 441 |\\n| [daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 437 |\\n| [showlab/VLog](https://github.com/showlab/VLog) | 436 |\\n| [wandb/weave](https://github.com/wandb/weave) | 420 |\\n| [QwenLM/Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) | 419 |\\n| [huchenxucs/ChatDB](https://github.com/huchenxucs/ChatDB) | 416 |\\n| [jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 411 |\\n| [monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 408 |\\n| [mallorbc/Finetune\\\\_LLMs](https://github.com/mallorbc/Finetune_LLMs) | 406 |\\n| [JayZeeDesign/researcher-gpt](https://github.com/JayZeeDesign/researcher-gpt) | 405 |\\n| [rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 401 |\\n| [langchain-ai/langsmith-cookbook](https://github.com/langchain-ai/langsmith-cookbook) | 398 |\\n| [mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 398 |\\n| [morpheuslord/GPT\\\\_Vuln-analyzer](https://github.com/morpheuslord/GPT_Vuln-analyzer) | 391 |\\n| [MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 387 |\\n| [JohnSnowLabs/langtest](https://github.com/JohnSnowLabs/langtest) | 384 |\\n| [mrwadams/attackgen](https://github.com/mrwadams/attackgen) | 381 |\\n| [codefuse-ai/Test-Agent](https://github.com/codefuse-ai/Test-Agent) | 380 |\\n| [personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 379 |\\n| [mosaicml/examples](https://github.com/mosaicml/examples) | 378 |\\n| [steamship-packages/langchain-production-starter](https://github.com/steamship-packages/langchain-production-starter) | 370 |\\n| [FlagAI-Open/Aquila2](https://github.com/FlagAI-Open/Aquila2) | 365 |\\n| [Mintplex-Labs/vector-admin](https://github.com/Mintplex-Labs/vector-admin) | 365 |\\n| [NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 357 |\\n| [BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 354 |\\n| [lilacai/lilac](https://github.com/lilacai/lilac) | 352 |\\n| [preset-io/promptimize](https://github.com/preset-io/promptimize) | 351 |\\n| [yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 347 |\\n| [andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 346 |\\n| [zhoudaquan/ChatAnything](https://github.com/zhoudaquan/ChatAnything) | 343 |\\n| [rgomezcasas/dotfiles](https://github.com/rgomezcasas/dotfiles) | 343 |\\n| [tigerlab-ai/tiger](https://github.com/tigerlab-ai/tiger) | 342 |\\n| [HumanSignal/label-studio-ml-backend](https://github.com/HumanSignal/label-studio-ml-backend) | 334 |\\n| [nasa-petal/bidara](https://github.com/nasa-petal/bidara) | 334 |\\n| [momegas/megabots](https://github.com/momegas/megabots) | 334 |\\n| [Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 330 |\\n| [CambioML/pykoi](https://github.com/CambioML/pykoi) | 326 |\\n| [Nuggt-dev/Nuggt](https://github.com/Nuggt-dev/Nuggt) | 326 |\\n| [wandb/edu](https://github.com/wandb/edu) | 326 |\\n| [Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 324 |\\n| [sugarforever/LangChain-Tutorials](https://github.com/sugarforever/LangChain-Tutorials) | 322 |\\n| [liangwq/Chatglm\\\\_lora\\\\_multi-gpu](https://github.com/liangwq/Chatglm_lora_multi-gpu) | 321 |\\n| [ur-whitelab/chemcrow-public](https://github.com/ur-whitelab/chemcrow-public) | 320 |\\n| [itamargol/openai](https://github.com/itamargol/openai) | 318 |\\n| [gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 304 |\\n| [SpecterOps/Nemesis](https://github.com/SpecterOps/Nemesis) | 302 |\\n| [facebookresearch/personal-timeline](https://github.com/facebookresearch/personal-timeline) | 302 |\\n| [hnawaz007/pythondataanalysis](https://github.com/hnawaz007/pythondataanalysis) | 301 |\\n| [Chainlit/cookbook](https://github.com/Chainlit/cookbook) | 300 |\\n| [airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 300 |\\n| [GPT-Fathom/GPT-Fathom](https://github.com/GPT-Fathom/GPT-Fathom) | 299 |\\n| [kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 299 |\\n| [kyegomez/swarms](https://github.com/kyegomez/swarms) | 296 |\\n| [LangStream/langstream](https://github.com/LangStream/langstream) | 295 |\\n| [genia-dev/GeniA](https://github.com/genia-dev/GeniA) | 294 |\\n| [shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 291 |\\n| [TsinghuaDatabaseGroup/DB-GPT](https://github.com/TsinghuaDatabaseGroup/DB-GPT) | 290 |\\n| [conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 283 |\\n| [sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 283 |\\n| [AutoPackAI/beebot](https://github.com/AutoPackAI/beebot) | 282 |\\n| [pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 282 |\\n| [gkamradt/LLMTest\\\\_NeedleInAHaystack](https://github.com/gkamradt/LLMTest_NeedleInAHaystack) | 280 |\\n| [gustavz/DataChad](https://github.com/gustavz/DataChad) | 280 |\\n| [Safiullah-Rahu/CSV-AI](https://github.com/Safiullah-Rahu/CSV-AI) | 278 |\\n| [hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 275 |\\n| [AkshitIreddy/Interactive-LLM-Powered-NPCs](https://github.com/AkshitIreddy/Interactive-LLM-Powered-NPCs) | 268 |\\n| [ennucore/clippinator](https://github.com/ennucore/clippinator) | 267 |\\n| [artitw/text2text](https://github.com/artitw/text2text) | 264 |\\n| [anarchy-ai/LLM-VM](https://github.com/anarchy-ai/LLM-VM) | 263 |\\n| [wpydcr/LLM-Kit](https://github.com/wpydcr/LLM-Kit) | 262 |\\n| [streamlit/llm-examples](https://github.com/streamlit/llm-examples) | 262 |\\n| [paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 262 |\\n| [yym68686/ChatGPT-Telegram-Bot](https://github.com/yym68686/ChatGPT-Telegram-Bot) | 261 |\\n| [PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 259 |\\n| [radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 259 |\\n| [ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 259 |\\n| [ml6team/fondant](https://github.com/ml6team/fondant) | 254 |\\n| [bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 254 |\\n| [rahulnyk/knowledge\\\\_graph](https://github.com/rahulnyk/knowledge_graph) | 253 |\\n| [recalign/RecAlign](https://github.com/recalign/RecAlign) | 248 |\\n| [hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 248 |\\n| [fetchai/uAgents](https://github.com/fetchai/uAgents) | 247 |\\n| [arthur-ai/bench](https://github.com/arthur-ai/bench) | 247 |\\n| [miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 246 |\\n| [RoboCoachTechnologies/GPT-Synthesizer](https://github.com/RoboCoachTechnologies/GPT-Synthesizer) | 244 |\\n| [langchain-ai/web-explorer](https://github.com/langchain-ai/web-explorer) | 242 |\\n| [kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 242 |\\n| [PJLab-ADG/DriveLikeAHuman](https://github.com/PJLab-ADG/DriveLikeAHuman) | 241 |\\n| [stepanogil/autonomous-hr-chatbot](https://github.com/stepanogil/autonomous-hr-chatbot) | 238 |\\n| [WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 236 |\\n| [nexus-stc/stc](https://github.com/nexus-stc/stc) | 235 |\\n| [yeagerai/genworlds](https://github.com/yeagerai/genworlds) | 235 |\\n| [Gentopia-AI/Gentopia](https://github.com/Gentopia-AI/Gentopia) | 235 |\\n| [alphasecio/langchain-examples](https://github.com/alphasecio/langchain-examples) | 235 |\\n| [grumpyp/aixplora](https://github.com/grumpyp/aixplora) | 232 |\\n| [shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 232 |\\n| [darrenburns/elia](https://github.com/darrenburns/elia) | 231 |\\n| [orgexyz/BlockAGI](https://github.com/orgexyz/BlockAGI) | 231 |\\n| [handrew/browserpilot](https://github.com/handrew/browserpilot) | 226 |\\n| [su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 225 |\\n| [nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 225 |\\n| [dbpunk-labs/octogen](https://github.com/dbpunk-labs/octogen) | 224 |\\n| [langchain-ai/weblangchain](https://github.com/langchain-ai/weblangchain) | 222 |\\n| [CL-lau/SQL-GPT](https://github.com/CL-lau/SQL-GPT) | 222 |\\n| [alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 221 |\\n| [showlab/UniVTG](https://github.com/showlab/UniVTG) | 220 |\\n| [edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 219 |\\n| [hardbyte/qabot](https://github.com/hardbyte/qabot) | 216 |\\n| [microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 215 |\\n| [Azure-Samples/chat-with-your-data-solution-accelerator](https://github.com/Azure-Samples/chat-with-your-data-solution-accelerator) | 214 |\\n| [amadad/agentcy](https://github.com/amadad/agentcy) | 213 |\\n| [snexus/llm-search](https://github.com/snexus/llm-search) | 212 |\\n| [afaqueumer/DocQA](https://github.com/afaqueumer/DocQA) | 206 |\\n| [plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 205 |\\n| [yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 205 |\\n| [benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 205 |\\n| [voxel51/voxelgpt](https://github.com/voxel51/voxelgpt) | 204 |\\n| [jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 204 |\\n| [emarco177/ice\\\\_breaker](https://github.com/emarco177/ice_breaker) | 204 |\\n| [tencentmusic/supersonic](https://github.com/tencentmusic/supersonic) | 202 |\\n| [Azure-Samples/azure-search-power-skills](https://github.com/Azure-Samples/azure-search-power-skills) | 202 |\\n| [blob42/Instrukt](https://github.com/blob42/Instrukt) | 201 |\\n| [langchain-ai/langsmith-sdk](https://github.com/langchain-ai/langsmith-sdk) | 200 |\\n| [SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 200 |\\n| [ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) | 198 |\\n| [KMnO4-zx/huanhuan-chat](https://github.com/KMnO4-zx/huanhuan-chat) | 196 |\\n| [Azure-Samples/jp-azureopenai-samples](https://github.com/Azure-Samples/jp-azureopenai-samples) | 192 |\\n| [hongbo-miao/hongbomiao.com](https://github.com/hongbo-miao/hongbomiao.com) | 190 |\\n| [CakeCrusher/openplugin](https://github.com/CakeCrusher/openplugin) | 190 |\\n| [PaddlePaddle/ERNIE-Bot-SDK](https://github.com/PaddlePaddle/ERNIE-Bot-SDK) | 189 |\\n| [retr0reg/Ret2GPT](https://github.com/retr0reg/Ret2GPT) | 189 |\\n| [AmineDiro/cria](https://github.com/AmineDiro/cria) | 187 |\\n| [lancedb/vectordb-recipes](https://github.com/lancedb/vectordb-recipes) | 186 |\\n| [vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 185 |\\n| [aws-ia/ecs-blueprints](https://github.com/aws-ia/ecs-blueprints) | 184 |\\n| [ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 183 |\\n| [MuhammadMoinFaisal/LargeLanguageModelsProjects](https://github.com/MuhammadMoinFaisal/LargeLanguageModelsProjects) | 182 |\\n| [shauryr/S2QA](https://github.com/shauryr/S2QA) | 181 |\\n| [summarizepaper/summarizepaper](https://github.com/summarizepaper/summarizepaper) | 180 |\\n| [NomaDamas/RAGchain](https://github.com/NomaDamas/RAGchain) | 179 |\\n| [pnkvalavala/repochat](https://github.com/pnkvalavala/repochat) | 179 |\\n| [ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 177 |\\n| [fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 177 |\\n| [langchain-ai/text-split-explorer](https://github.com/langchain-ai/text-split-explorer) | 175 |\\n| [iMagist486/ElasticSearch-Langchain-Chatglm2](https://github.com/iMagist486/ElasticSearch-Langchain-Chatglm2) | 175 |\\n| [limaoyi1/Auto-PPT](https://github.com/limaoyi1/Auto-PPT) | 175 |\\n| [Open-Swarm-Net/GPT-Swarm](https://github.com/Open-Swarm-Net/GPT-Swarm) | 175 |\\n| [morpheuslord/HackBot](https://github.com/morpheuslord/HackBot) | 174 |\\n| [v7labs/benchllm](https://github.com/v7labs/benchllm) | 174 |\\n| [Coding-Crashkurse/Langchain-Full-Course](https://github.com/Coding-Crashkurse/Langchain-Full-Course) | 174 |\\n| [dongyh20/Octopus](https://github.com/dongyh20/Octopus) | 173 |\\n| [kimtth/azure-openai-llm-vector-langchain](https://github.com/kimtth/azure-openai-llm-vector-langchain) | 173 |\\n| [mayooear/private-chatbot-mpt30b-langchain](https://github.com/mayooear/private-chatbot-mpt30b-langchain) | 173 |\\n| [zilliztech/akcio](https://github.com/zilliztech/akcio) | 172 |\\n| [jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 172 |\\n| [ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 172 |\\n| [joaomdmoura/CrewAI](https://github.com/joaomdmoura/CrewAI) | 170 |\\n| [katanaml/llm-mistral-invoice-cpu](https://github.com/katanaml/llm-mistral-invoice-cpu) | 170 |\\n| [chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 170 |\\n| [mudler/LocalAGI](https://github.com/mudler/LocalAGI) | 167 |\\n| [dssjon/biblos](https://github.com/dssjon/biblos) | 165 |\\n| [kjappelbaum/gptchem](https://github.com/kjappelbaum/gptchem) | 165 |\\n| [xxw1995/chatglm3-finetune](https://github.com/xxw1995/chatglm3-finetune) | 164 |\\n| [ArjanCodes/examples](https://github.com/ArjanCodes/examples) | 163 |\\n| [AIAnytime/Llama2-Medical-Chatbot](https://github.com/AIAnytime/Llama2-Medical-Chatbot) | 163 |\\n| [RCGAI/SimplyRetrieve](https://github.com/RCGAI/SimplyRetrieve) | 162 |\\n| [langchain-ai/langchain-teacher](https://github.com/langchain-ai/langchain-teacher) | 162 |\\n| [menloparklab/falcon-langchain](https://github.com/menloparklab/falcon-langchain) | 162 |\\n| [flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 162 |\\n| [homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 161 |\\n| [jiran214/langup-ai](https://github.com/jiran214/langup-ai) | 160 |\\n| [JorisdeJong123/7-Days-of-LangChain](https://github.com/JorisdeJong123/7-Days-of-LangChain) | 160 |\\n| [GoogleCloudPlatform/data-analytics-golden-demo](https://github.com/GoogleCloudPlatform/data-analytics-golden-demo) | 159 |\\n| [positive666/Prompt-Can-Anything](https://github.com/positive666/Prompt-Can-Anything) | 159 |\\n| [luisroque/large\\\\_laguage\\\\_models](https://github.com/luisroque/large_laguage_models) | 159 |\\n| [mlops-for-all/mlops-for-all.github.io](https://github.com/mlops-for-all/mlops-for-all.github.io) | 158 |\\n| [wandb/wandbot](https://github.com/wandb/wandbot) | 158 |\\n| [elastic/elasticsearch-labs](https://github.com/elastic/elasticsearch-labs) | 157 |\\n| [shroominic/funcchain](https://github.com/shroominic/funcchain) | 157 |\\n| [deeppavlov/dream](https://github.com/deeppavlov/dream) | 156 |\\n| [mluogh/eastworld](https://github.com/mluogh/eastworld) | 154 |\\n| [georgesung/llm\\\\_qlora](https://github.com/georgesung/llm_qlora) | 154 |\\n| [RUC-GSAI/YuLan-Rec](https://github.com/RUC-GSAI/YuLan-Rec) | 153 |\\n| [KylinC/ChatFinance](https://github.com/KylinC/ChatFinance) | 152 |\\n| [Dicklesworthstone/llama2\\\\_aided\\\\_tesseract](https://github.com/Dicklesworthstone/llama2_aided_tesseract) | 152 |\\n| [c0sogi/LLMChat](https://github.com/c0sogi/LLMChat) | 152 |\\n| [eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 152 |\\n| [ErikBjare/gptme](https://github.com/ErikBjare/gptme) | 152 |\\n| [Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 152 |\\n| [RoboCoachTechnologies/ROScribe](https://github.com/RoboCoachTechnologies/ROScribe) | 151 |\\n| [Aggregate-Intellect/sherpa](https://github.com/Aggregate-Intellect/sherpa) | 151 |\\n| [3Alan/DocsMind](https://github.com/3Alan/DocsMind) | 151 |\\n| [tangqiaoyu/ToolAlpaca](https://github.com/tangqiaoyu/ToolAlpaca) | 150 |\\n| [kulltc/chatgpt-sql](https://github.com/kulltc/chatgpt-sql) | 150 |\\n| [mallahyari/drqa](https://github.com/mallahyari/drqa) | 150 |\\n| [MedalCollector/Orator](https://github.com/MedalCollector/Orator) | 149 |\\n| [Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 149 |\\n| [realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 148 |\\n| [ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 148 |\\n| [solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 147 |\\n| [aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 147 |\\n| [Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 146 |\\n| [menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 146 |\\n| [trancethehuman/entities-extraction-web-scraper](https://github.com/trancethehuman/entities-extraction-web-scraper) | 144 |\\n| [peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 144 |\\n| [grumpyp/chroma-langchain-tutorial](https://github.com/grumpyp/chroma-langchain-tutorial) | 144 |\\n| [gh18l/CrawlGPT](https://github.com/gh18l/CrawlGPT) | 142 |\\n| [langchain-ai/langchain-aws-template](https://github.com/langchain-ai/langchain-aws-template) | 142 |\\n| [yasyf/summ](https://github.com/yasyf/summ) | 141 |\\n| [petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 141 |\\n| [hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 140 |\\n| [jina-ai/fastapi-serve](https://github.com/jina-ai/fastapi-serve) | 139 |\\n| [zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 139 |\\n| [jlonge4/local\\\\_llama](https://github.com/jlonge4/local_llama) | 139 |\\n| [smyja/blackmaria](https://github.com/smyja/blackmaria) | 138 |\\n| [ChuloAI/BrainChulo](https://github.com/ChuloAI/BrainChulo) | 137 |\\n| [log1stics/voice-generator-webui](https://github.com/log1stics/voice-generator-webui) | 137 |\\n| [davila7/file-gpt](https://github.com/davila7/file-gpt) | 137 |\\n| [dcaribou/transfermarkt-datasets](https://github.com/dcaribou/transfermarkt-datasets) | 136 |\\n| [ciare-robotics/world-creator](https://github.com/ciare-robotics/world-creator) | 135 |\\n| [Undertone0809/promptulate](https://github.com/Undertone0809/promptulate) | 134 |\\n| [fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 134 |\\n| [run-llama/ai-engineer-workshop](https://github.com/run-llama/ai-engineer-workshop) | 133 |\\n| [definitive-io/code-indexer-loop](https://github.com/definitive-io/code-indexer-loop) | 131 |\\n| [mortium91/langchain-assistant](https://github.com/mortium91/langchain-assistant) | 131 |\\n| [baidubce/bce-qianfan-sdk](https://github.com/baidubce/bce-qianfan-sdk) | 130 |\\n| [Ngonie-x/langchain\\\\_csv](https://github.com/Ngonie-x/langchain_csv) | 130 |\\n| [IvanIsCoding/ResuLLMe](https://github.com/IvanIsCoding/ResuLLMe) | 130 |\\n| [AnchoringAI/anchoring-ai](https://github.com/AnchoringAI/anchoring-ai) | 129 |\\n| [Azure/business-process-automation](https://github.com/Azure/business-process-automation) | 128 |\\n| [athina-ai/athina-sdk](https://github.com/athina-ai/athina-sdk) | 126 |\\n| [thunlp/ChatEval](https://github.com/thunlp/ChatEval) | 126 |\\n| [prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 126 |\\n| [vietanhdev/pautobot](https://github.com/vietanhdev/pautobot) | 125 |\\n| [awslabs/generative-ai-cdk-constructs](https://github.com/awslabs/generative-ai-cdk-constructs) | 124 |\\n| [sdaaron/QueryGPT](https://github.com/sdaaron/QueryGPT) | 124 |\\n| [rabbitmetrics/langchain-13-min](https://github.com/rabbitmetrics/langchain-13-min) | 124 |\\n| [AutoLLM/AutoAgents](https://github.com/AutoLLM/AutoAgents) | 122 |\\n| [nicknochnack/Nopenai](https://github.com/nicknochnack/Nopenai) | 122 |\\n| [wombyz/HormoziGPT](https://github.com/wombyz/HormoziGPT) | 122 |\\n| [dotvignesh/PDFChat](https://github.com/dotvignesh/PDFChat) | 122 |\\n| [topoteretes/PromethAI-Backend](https://github.com/topoteretes/PromethAI-Backend) | 121 |\\n| [nftblackmagic/flask-langchain](https://github.com/nftblackmagic/flask-langchain) | 121 |\\n| [vishwasg217/finsight](https://github.com/vishwasg217/finsight) | 120 |\\n| [snap-stanford/MLAgentBench](https://github.com/snap-stanford/MLAgentBench) | 120 |\\n| [Azure/app-service-linux-docs](https://github.com/Azure/app-service-linux-docs) | 120 |\\n| [nyanp/chat2plot](https://github.com/nyanp/chat2plot) | 120 |\\n| [ant4g0nist/polar](https://github.com/ant4g0nist/polar) | 119 |\\n| [aws-samples/cdk-eks-blueprints-patterns](https://github.com/aws-samples/cdk-eks-blueprints-patterns) | 119 |\\n| [aws-samples/amazon-kendra-langchain-extensions](https://github.com/aws-samples/amazon-kendra-langchain-extensions) | 119 |\\n| [Xueheng-Li/SynologyChatbotGPT](https://github.com/Xueheng-Li/SynologyChatbotGPT) | 119 |\\n| [CodeAlchemyAI/ViLT-GPT](https://github.com/CodeAlchemyAI/ViLT-GPT) | 117 |\\n| [Lin-jun-xiang/docGPT-langchain](https://github.com/Lin-jun-xiang/docGPT-langchain) | 117 |\\n| [ademakdogan/ChatSQL](https://github.com/ademakdogan/ChatSQL) | 116 |\\n| [aniketmaurya/llm-inference](https://github.com/aniketmaurya/llm-inference) | 115 |\\n| [xuwenhao/mactalk-ai-course](https://github.com/xuwenhao/mactalk-ai-course) | 115 |\\n| [cmooredev/RepoReader](https://github.com/cmooredev/RepoReader) | 115 |\\n| [abi/autocommit](https://github.com/abi/autocommit) | 115 |\\n| [MIDORIBIN/langchain-gpt4free](https://github.com/MIDORIBIN/langchain-gpt4free) | 114 |\\n| [finaldie/auto-news](https://github.com/finaldie/auto-news) | 114 |\\n| [Anil-matcha/Youtube-to-chatbot](https://github.com/Anil-matcha/Youtube-to-chatbot) | 114 |\\n| [avrabyt/MemoryBot](https://github.com/avrabyt/MemoryBot) | 114 |\\n| [Capsize-Games/airunner](https://github.com/Capsize-Games/airunner) | 113 |\\n| [atisharma/llama\\\\_farm](https://github.com/atisharma/llama_farm) | 113 |\\n| [mbchang/data-driven-characters](https://github.com/mbchang/data-driven-characters) | 112 |\\n| [fiddler-labs/fiddler-auditor](https://github.com/fiddler-labs/fiddler-auditor) | 112 |\\n| [dirkjbreeuwer/gpt-automated-web-scraper](https://github.com/dirkjbreeuwer/gpt-automated-web-scraper) | 111 |\\n| [Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding](https://github.com/Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding) | 111 |\\n| [hwchase17/langchain-gradio-template](https://github.com/hwchase17/langchain-gradio-template) | 111 |\\n| [artas728/spelltest](https://github.com/artas728/spelltest) | 110 |\\n| [NVIDIA/GenerativeAIExamples](https://github.com/NVIDIA/GenerativeAIExamples) | 109 |\\n| [Azure/aistudio-copilot-sample](https://github.com/Azure/aistudio-copilot-sample) | 108 |\\n| [codefuse-ai/codefuse-chatbot](https://github.com/codefuse-ai/codefuse-chatbot) | 108 |\\n| [apirrone/Memento](https://github.com/apirrone/Memento) | 108 |\\n| [e-johnstonn/GPT-Doc-Summarizer](https://github.com/e-johnstonn/GPT-Doc-Summarizer) | 108 |\\n| [salesforce/BOLAA](https://github.com/salesforce/BOLAA) | 107 |\\n| [Erol444/gpt4-openai-api](https://github.com/Erol444/gpt4-openai-api) | 106 |\\n| [linjungz/chat-with-your-doc](https://github.com/linjungz/chat-with-your-doc) | 106 |\\n| [crosleythomas/MirrorGPT](https://github.com/crosleythomas/MirrorGPT) | 106 |\\n| [panaverse/learn-generative-ai](https://github.com/panaverse/learn-generative-ai) | 105 |\\n| [Azure/azure-sdk-tools](https://github.com/Azure/azure-sdk-tools) | 105 |\\n| [malywut/gpt\\\\_examples](https://github.com/malywut/gpt_examples) | 105 |\\n| [ritun16/chain-of-verification](https://github.com/ritun16/chain-of-verification) | 104 |\\n| [langchain-ai/langchain-benchmarks](https://github.com/langchain-ai/langchain-benchmarks) | 104 |\\n| [lightninglabs/LangChainBitcoin](https://github.com/lightninglabs/LangChainBitcoin) | 104 |\\n| [flepied/second-brain-agent](https://github.com/flepied/second-brain-agent) | 103 |\\n| [llmapp/openai.mini](https://github.com/llmapp/openai.mini) | 102 |\\n| [gimlet-ai/tddGPT](https://github.com/gimlet-ai/tddGPT) | 102 |\\n| [jlonge4/gpt\\\\_chatwithPDF](https://github.com/jlonge4/gpt_chatwithPDF) | 102 |\\n| [agentification/RAFA\\\\_code](https://github.com/agentification/RAFA_code) | 101 |\\n| [pacman100/DHS-LLM-Workshop](https://github.com/pacman100/DHS-LLM-Workshop) | 101 |\\n| [aws-samples/private-llm-qa-bot](https://github.com/aws-samples/private-llm-qa-bot) | 101 |\\n\\n_Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)\\n_\\n\\n`github-dependents-info --repo \"langchain-ai/langchain\" --markdownfile dependents.md --minstars 100 --sort stars`\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/dependents.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nDependents\\n==========\\n\\nDependents stats for `langchain-ai/langchain`\\n\\n[![](https://img.shields.io/static/v1?label=Used%20by&message=41717&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)\\n [![](https://img.shields.io/static/v1?label=Used%20by%20(public)](https://github.com/langchain-ai/langchain/network/dependents)\\n [![](https://img.shields.io/static/v1?label=Used%20by%20(private)](https://github.com/langchain-ai/langchain/network/dependents)\\n\\n\\\\[update: `2023-12-08`; only dependent repositories with Stars > 100\\\\]\\n\\n| Repository | Stars |\\n| --- | --- |\\n| [AntonOsika/gpt-engineer](https://github.com/AntonOsika/gpt-engineer) | 46514 |\\n| [imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 44439 |\\n| [LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 35906 |\\n| [hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 35528 |\\n| [moymix/TaskMatrix](https://github.com/moymix/TaskMatrix) | 34342 |\\n| [geekan/MetaGPT](https://github.com/geekan/MetaGPT) | 31126 |\\n| [streamlit/streamlit](https://github.com/streamlit/streamlit) | 28911 |\\n| [reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 27833 |\\n| [StanGirard/quivr](https://github.com/StanGirard/quivr) | 26032 |\\n| [OpenBB-finance/OpenBBTerminal](https://github.com/OpenBB-finance/OpenBBTerminal) | 24946 |\\n| [run-llama/llama\\\\_index](https://github.com/run-llama/llama_index) | 24859 |\\n| [jmorganca/ollama](https://github.com/jmorganca/ollama) | 20849 |\\n| [openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 20249 |\\n| [chatchat-space/Langchain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat) | 19305 |\\n| [mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 19172 |\\n| [PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 17528 |\\n| [cube-js/cube](https://github.com/cube-js/cube) | 16575 |\\n| [mlflow/mlflow](https://github.com/mlflow/mlflow) | 16000 |\\n| [mudler/LocalAI](https://github.com/mudler/LocalAI) | 14067 |\\n| [logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 13679 |\\n| [GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 13648 |\\n| [arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 13423 |\\n| [openai/evals](https://github.com/openai/evals) | 12649 |\\n| [airbytehq/airbyte](https://github.com/airbytehq/airbyte) | 12460 |\\n| [langgenius/dify](https://github.com/langgenius/dify) | 11859 |\\n| [databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10672 |\\n| [AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 9437 |\\n| [langchain-ai/langchainjs](https://github.com/langchain-ai/langchainjs) | 9227 |\\n| [gventuri/pandas-ai](https://github.com/gventuri/pandas-ai) | 9203 |\\n| [aws/amazon-sagemaker-examples](https://github.com/aws/amazon-sagemaker-examples) | 9079 |\\n| [h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 8945 |\\n| [PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 7550 |\\n| [bentoml/OpenLLM](https://github.com/bentoml/OpenLLM) | 6957 |\\n| [THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6801 |\\n| [microsoft/promptflow](https://github.com/microsoft/promptflow) | 6776 |\\n| [cpacker/MemGPT](https://github.com/cpacker/MemGPT) | 6642 |\\n| [joshpxyne/gpt-migrate](https://github.com/joshpxyne/gpt-migrate) | 6482 |\\n| [zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 6037 |\\n| [embedchain/embedchain](https://github.com/embedchain/embedchain) | 6023 |\\n| [mage-ai/mage-ai](https://github.com/mage-ai/mage-ai) | 6019 |\\n| [assafelovic/gpt-researcher](https://github.com/assafelovic/gpt-researcher) | 5936 |\\n| [sweepai/sweep](https://github.com/sweepai/sweep) | 5855 |\\n| [wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 5766 |\\n| [zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 5710 |\\n| [pdm-project/pdm](https://github.com/pdm-project/pdm) | 5665 |\\n| [GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 5568 |\\n| [gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 5507 |\\n| [Shaunwei/RealChar](https://github.com/Shaunwei/RealChar) | 5501 |\\n| [facebookresearch/llama-recipes](https://github.com/facebookresearch/llama-recipes) | 5477 |\\n| [serge-chat/serge](https://github.com/serge-chat/serge) | 5221 |\\n| [run-llama/rags](https://github.com/run-llama/rags) | 4916 |\\n| [openchatai/OpenChat](https://github.com/openchatai/OpenChat) | 4870 |\\n| [danswer-ai/danswer](https://github.com/danswer-ai/danswer) | 4774 |\\n| [langchain-ai/opengpts](https://github.com/langchain-ai/opengpts) | 4709 |\\n| [postgresml/postgresml](https://github.com/postgresml/postgresml) | 4639 |\\n| [MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 4582 |\\n| [intel-analytics/BigDL](https://github.com/intel-analytics/BigDL) | 4581 |\\n| [yihong0618/xiaogpt](https://github.com/yihong0618/xiaogpt) | 4359 |\\n| [RayVentura/ShortGPT](https://github.com/RayVentura/ShortGPT) | 4357 |\\n| [Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 4317 |\\n| [madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4289 |\\n| [apache/nifi](https://github.com/apache/nifi) | 4098 |\\n| [langchain-ai/chat-langchain](https://github.com/langchain-ai/chat-langchain) | 4091 |\\n| [aiwaves-cn/agents](https://github.com/aiwaves-cn/agents) | 4073 |\\n| [krishnaik06/The-Grand-Complete-Data-Science-Materials](https://github.com/krishnaik06/The-Grand-Complete-Data-Science-Materials) | 4065 |\\n| [khoj-ai/khoj](https://github.com/khoj-ai/khoj) | 4016 |\\n| [Azure/azure-sdk-for-python](https://github.com/Azure/azure-sdk-for-python) | 3941 |\\n| [PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 3915 |\\n| [OpenBMB/ToolBench](https://github.com/OpenBMB/ToolBench) | 3799 |\\n| [marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3771 |\\n| [kyegomez/tree-of-thoughts](https://github.com/kyegomez/tree-of-thoughts) | 3688 |\\n| [Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 3543 |\\n| [llm-workflow-engine/llm-workflow-engine](https://github.com/llm-workflow-engine/llm-workflow-engine) | 3515 |\\n| [shroominic/codeinterpreter-api](https://github.com/shroominic/codeinterpreter-api) | 3425 |\\n| [openchatai/OpenCopilot](https://github.com/openchatai/OpenCopilot) | 3418 |\\n| [josStorer/RWKV-Runner](https://github.com/josStorer/RWKV-Runner) | 3297 |\\n| [whitead/paper-qa](https://github.com/whitead/paper-qa) | 3280 |\\n| [homanp/superagent](https://github.com/homanp/superagent) | 3258 |\\n| [ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui) | 3199 |\\n| [OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 3099 |\\n| [project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 3090 |\\n| [OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2989 |\\n| [xlang-ai/OpenAgents](https://github.com/xlang-ai/OpenAgents) | 2825 |\\n| [dataelement/bisheng](https://github.com/dataelement/bisheng) | 2797 |\\n| [Mintplex-Labs/anything-llm](https://github.com/Mintplex-Labs/anything-llm) | 2784 |\\n| [OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2734 |\\n| [run-llama/llama-hub](https://github.com/run-llama/llama-hub) | 2721 |\\n| [SamurAIGPT/EmbedAI](https://github.com/SamurAIGPT/EmbedAI) | 2647 |\\n| [NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 2637 |\\n| [X-D-Lab/LangChain-ChatGLM-Webui](https://github.com/X-D-Lab/LangChain-ChatGLM-Webui) | 2532 |\\n| [GerevAI/gerev](https://github.com/GerevAI/gerev) | 2517 |\\n| [keephq/keep](https://github.com/keephq/keep) | 2448 |\\n| [yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 2397 |\\n| [OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 2324 |\\n| [IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 2241 |\\n| [YiVal/YiVal](https://github.com/YiVal/YiVal) | 2232 |\\n| [jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 2189 |\\n| [Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 2136 |\\n| [microsoft/TaskWeaver](https://github.com/microsoft/TaskWeaver) | 2126 |\\n| [hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 2083 |\\n| [FlagOpen/FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) | 2053 |\\n| [paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1999 |\\n| [hegelai/prompttools](https://github.com/hegelai/prompttools) | 1984 |\\n| [mckinsey/vizro](https://github.com/mckinsey/vizro) | 1951 |\\n| [vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1868 |\\n| [dot-agent/openAMS](https://github.com/dot-agent/openAMS) | 1796 |\\n| [explodinggradients/ragas](https://github.com/explodinggradients/ragas) | 1766 |\\n| [AI-Citizen/SolidGPT](https://github.com/AI-Citizen/SolidGPT) | 1761 |\\n| [Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1696 |\\n| [run-llama/sec-insights](https://github.com/run-llama/sec-insights) | 1654 |\\n| [avinashkranjan/Amazing-Python-Scripts](https://github.com/avinashkranjan/Amazing-Python-Scripts) | 1635 |\\n| [microsoft/WhatTheHack](https://github.com/microsoft/WhatTheHack) | 1629 |\\n| [noahshinn/reflexion](https://github.com/noahshinn/reflexion) | 1625 |\\n| [psychic-api/psychic](https://github.com/psychic-api/psychic) | 1618 |\\n| [Forethought-Technologies/AutoChain](https://github.com/Forethought-Technologies/AutoChain) | 1611 |\\n| [pinterest/querybook](https://github.com/pinterest/querybook) | 1586 |\\n| [refuel-ai/autolabel](https://github.com/refuel-ai/autolabel) | 1553 |\\n| [jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 1537 |\\n| [jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1522 |\\n| [agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1493 |\\n| [ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1484 |\\n| [greshake/llm-security](https://github.com/greshake/llm-security) | 1483 |\\n| [promptfoo/promptfoo](https://github.com/promptfoo/promptfoo) | 1480 |\\n| [milvus-io/bootcamp](https://github.com/milvus-io/bootcamp) | 1477 |\\n| [richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1475 |\\n| [melih-unsal/DemoGPT](https://github.com/melih-unsal/DemoGPT) | 1428 |\\n| [YORG-AI/Open-Assistant](https://github.com/YORG-AI/Open-Assistant) | 1419 |\\n| [101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 1416 |\\n| [jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1408 |\\n| [mmz-001/knowledge\\\\_gpt](https://github.com/mmz-001/knowledge_gpt) | 1398 |\\n| [intel/intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers) | 1387 |\\n| [Azure/azureml-examples](https://github.com/Azure/azureml-examples) | 1385 |\\n| [lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1367 |\\n| [eyurtsev/kor](https://github.com/eyurtsev/kor) | 1355 |\\n| [xusenlinzy/api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) | 1325 |\\n| [griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 1323 |\\n| [SuperDuperDB/superduperdb](https://github.com/SuperDuperDB/superduperdb) | 1290 |\\n| [cofactoryai/textbase](https://github.com/cofactoryai/textbase) | 1284 |\\n| [psychic-api/rag-stack](https://github.com/psychic-api/rag-stack) | 1260 |\\n| [filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 1250 |\\n| [nod-ai/SHARK](https://github.com/nod-ai/SHARK) | 1237 |\\n| [pluralsh/plural](https://github.com/pluralsh/plural) | 1234 |\\n| [cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 1194 |\\n| [LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya) | 1184 |\\n| [poe-platform/server-bot-quick-start](https://github.com/poe-platform/server-bot-quick-start) | 1182 |\\n| [microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 1180 |\\n| [juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1171 |\\n| [visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 1156 |\\n| [alejandro-ao/ask-multiple-pdfs](https://github.com/alejandro-ao/ask-multiple-pdfs) | 1153 |\\n| [ThousandBirdsInc/chidori](https://github.com/ThousandBirdsInc/chidori) | 1152 |\\n| [irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 1137 |\\n| [SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 1083 |\\n| [ray-project/llm-applications](https://github.com/ray-project/llm-applications) | 1080 |\\n| [run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 1072 |\\n| [jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 1041 |\\n| [MetaGLM/FinGLM](https://github.com/MetaGLM/FinGLM) | 1035 |\\n| [peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 1020 |\\n| [Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 991 |\\n| [langchain-ai/langserve](https://github.com/langchain-ai/langserve) | 983 |\\n| [THUDM/AgentTuning](https://github.com/THUDM/AgentTuning) | 976 |\\n| [rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 975 |\\n| [codeacme17/examor](https://github.com/codeacme17/examor) | 964 |\\n| [all-in-aigc/gpts-works](https://github.com/all-in-aigc/gpts-works) | 946 |\\n| [Ikaros-521/AI-Vtuber](https://github.com/Ikaros-521/AI-Vtuber) | 946 |\\n| [microsoft/Llama-2-Onnx](https://github.com/microsoft/Llama-2-Onnx) | 898 |\\n| [cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 895 |\\n| [ricklamers/shell-ai](https://github.com/ricklamers/shell-ai) | 893 |\\n| [modelscope/modelscope-agent](https://github.com/modelscope/modelscope-agent) | 893 |\\n| [seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 886 |\\n| [ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 880 |\\n| [kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference](https://github.com/kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference) | 872 |\\n| [corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 846 |\\n| [hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 841 |\\n| [kreneskyp/ix](https://github.com/kreneskyp/ix) | 821 |\\n| [Link-AGI/AutoAgents](https://github.com/Link-AGI/AutoAgents) | 820 |\\n| [truera/trulens](https://github.com/truera/trulens) | 794 |\\n| [Dataherald/dataherald](https://github.com/Dataherald/dataherald) | 788 |\\n| [sunlabuiuc/PyHealth](https://github.com/sunlabuiuc/PyHealth) | 783 |\\n| [jondurbin/airoboros](https://github.com/jondurbin/airoboros) | 783 |\\n| [pyspark-ai/pyspark-ai](https://github.com/pyspark-ai/pyspark-ai) | 782 |\\n| [confident-ai/deepeval](https://github.com/confident-ai/deepeval) | 780 |\\n| [billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 777 |\\n| [langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent) | 776 |\\n| [akshata29/entaoai](https://github.com/akshata29/entaoai) | 771 |\\n| [LambdaLabsML/examples](https://github.com/LambdaLabsML/examples) | 770 |\\n| [getmetal/motorhead](https://github.com/getmetal/motorhead) | 768 |\\n| [Dicklesworthstone/swiss\\\\_army\\\\_llama](https://github.com/Dicklesworthstone/swiss_army_llama) | 757 |\\n| [ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 757 |\\n| [msoedov/langcorn](https://github.com/msoedov/langcorn) | 754 |\\n| [e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 753 |\\n| [microsoft/sample-app-aoai-chatGPT](https://github.com/microsoft/sample-app-aoai-chatGPT) | 749 |\\n| [explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 731 |\\n| [MiuLab/Taiwan-LLM](https://github.com/MiuLab/Taiwan-LLM) | 716 |\\n| [whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 702 |\\n| [Azure-Samples/openai](https://github.com/Azure-Samples/openai) | 692 |\\n| [iusztinpaul/hands-on-llms](https://github.com/iusztinpaul/hands-on-llms) | 687 |\\n| [safevideo/autollm](https://github.com/safevideo/autollm) | 682 |\\n| [OpenGenerativeAI/GenossGPT](https://github.com/OpenGenerativeAI/GenossGPT) | 669 |\\n| [NoDataFound/hackGPT](https://github.com/NoDataFound/hackGPT) | 663 |\\n| [AILab-CVC/GPT4Tools](https://github.com/AILab-CVC/GPT4Tools) | 662 |\\n| [langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 657 |\\n| [yvann-ba/Robby-chatbot](https://github.com/yvann-ba/Robby-chatbot) | 639 |\\n| [alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 635 |\\n| [amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 630 |\\n| [microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 621 |\\n| [aws-samples/aws-genai-llm-chatbot](https://github.com/aws-samples/aws-genai-llm-chatbot) | 616 |\\n| [NeumTry/NeumAI](https://github.com/NeumTry/NeumAI) | 605 |\\n| [namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 599 |\\n| [plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 595 |\\n| [marimo-team/marimo](https://github.com/marimo-team/marimo) | 591 |\\n| [yakami129/VirtualWife](https://github.com/yakami129/VirtualWife) | 586 |\\n| [xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 584 |\\n| [jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 573 |\\n| [dgarnitz/vectorflow](https://github.com/dgarnitz/vectorflow) | 568 |\\n| [yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 564 |\\n| [daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 563 |\\n| [traceloop/openllmetry](https://github.com/traceloop/openllmetry) | 559 |\\n| [Agenta-AI/agenta](https://github.com/Agenta-AI/agenta) | 546 |\\n| [michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 545 |\\n| [jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 544 |\\n| [mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 533 |\\n| [marella/chatdocs](https://github.com/marella/chatdocs) | 532 |\\n| [opentensor/bittensor](https://github.com/opentensor/bittensor) | 532 |\\n| [DjangoPeng/openai-quickstart](https://github.com/DjangoPeng/openai-quickstart) | 527 |\\n| [freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 517 |\\n| [sidhq/Multi-GPT](https://github.com/sidhq/Multi-GPT) | 515 |\\n| [alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 514 |\\n| [sajjadium/ctf-archives](https://github.com/sajjadium/ctf-archives) | 507 |\\n| [continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 502 |\\n| [steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 494 |\\n| [mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 493 |\\n| [langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 492 |\\n| [logan-markewich/llama\\\\_index\\\\_starter\\\\_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 483 |\\n| [datawhalechina/llm-universe](https://github.com/datawhalechina/llm-universe) | 475 |\\n| [leondz/garak](https://github.com/leondz/garak) | 464 |\\n| [RedisVentures/ArXivChatGuru](https://github.com/RedisVentures/ArXivChatGuru) | 461 |\\n| [Anil-matcha/Chatbase](https://github.com/Anil-matcha/Chatbase) | 455 |\\n| [Aiyu-awa/luna-ai](https://github.com/Aiyu-awa/luna-ai) | 450 |\\n| [DataDog/dd-trace-py](https://github.com/DataDog/dd-trace-py) | 450 |\\n| [Azure-Samples/miyagi](https://github.com/Azure-Samples/miyagi) | 449 |\\n| [poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 447 |\\n| [onlyphantom/llm-python](https://github.com/onlyphantom/llm-python) | 446 |\\n| [junruxiong/IncarnaMind](https://github.com/junruxiong/IncarnaMind) | 441 |\\n| [CarperAI/OpenELM](https://github.com/CarperAI/OpenELM) | 441 |\\n| [daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 437 |\\n| [showlab/VLog](https://github.com/showlab/VLog) | 436 |\\n| [wandb/weave](https://github.com/wandb/weave) | 420 |\\n| [QwenLM/Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) | 419 |\\n| [huchenxucs/ChatDB](https://github.com/huchenxucs/ChatDB) | 416 |\\n| [jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 411 |\\n| [monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 408 |\\n| [mallorbc/Finetune\\\\_LLMs](https://github.com/mallorbc/Finetune_LLMs) | 406 |\\n| [JayZeeDesign/researcher-gpt](https://github.com/JayZeeDesign/researcher-gpt) | 405 |\\n| [rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 401 |\\n| [langchain-ai/langsmith-cookbook](https://github.com/langchain-ai/langsmith-cookbook) | 398 |\\n| [mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 398 |\\n| [morpheuslord/GPT\\\\_Vuln-analyzer](https://github.com/morpheuslord/GPT_Vuln-analyzer) | 391 |\\n| [MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 387 |\\n| [JohnSnowLabs/langtest](https://github.com/JohnSnowLabs/langtest) | 384 |\\n| [mrwadams/attackgen](https://github.com/mrwadams/attackgen) | 381 |\\n| [codefuse-ai/Test-Agent](https://github.com/codefuse-ai/Test-Agent) | 380 |\\n| [personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 379 |\\n| [mosaicml/examples](https://github.com/mosaicml/examples) | 378 |\\n| [steamship-packages/langchain-production-starter](https://github.com/steamship-packages/langchain-production-starter) | 370 |\\n| [FlagAI-Open/Aquila2](https://github.com/FlagAI-Open/Aquila2) | 365 |\\n| [Mintplex-Labs/vector-admin](https://github.com/Mintplex-Labs/vector-admin) | 365 |\\n| [NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 357 |\\n| [BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 354 |\\n| [lilacai/lilac](https://github.com/lilacai/lilac) | 352 |\\n| [preset-io/promptimize](https://github.com/preset-io/promptimize) | 351 |\\n| [yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 347 |\\n| [andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 346 |\\n| [zhoudaquan/ChatAnything](https://github.com/zhoudaquan/ChatAnything) | 343 |\\n| [rgomezcasas/dotfiles](https://github.com/rgomezcasas/dotfiles) | 343 |\\n| [tigerlab-ai/tiger](https://github.com/tigerlab-ai/tiger) | 342 |\\n| [HumanSignal/label-studio-ml-backend](https://github.com/HumanSignal/label-studio-ml-backend) | 334 |\\n| [nasa-petal/bidara](https://github.com/nasa-petal/bidara) | 334 |\\n| [momegas/megabots](https://github.com/momegas/megabots) | 334 |\\n| [Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 330 |\\n| [CambioML/pykoi](https://github.com/CambioML/pykoi) | 326 |\\n| [Nuggt-dev/Nuggt](https://github.com/Nuggt-dev/Nuggt) | 326 |\\n| [wandb/edu](https://github.com/wandb/edu) | 326 |\\n| [Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 324 |\\n| [sugarforever/LangChain-Tutorials](https://github.com/sugarforever/LangChain-Tutorials) | 322 |\\n| [liangwq/Chatglm\\\\_lora\\\\_multi-gpu](https://github.com/liangwq/Chatglm_lora_multi-gpu) | 321 |\\n| [ur-whitelab/chemcrow-public](https://github.com/ur-whitelab/chemcrow-public) | 320 |\\n| [itamargol/openai](https://github.com/itamargol/openai) | 318 |\\n| [gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 304 |\\n| [SpecterOps/Nemesis](https://github.com/SpecterOps/Nemesis) | 302 |\\n| [facebookresearch/personal-timeline](https://github.com/facebookresearch/personal-timeline) | 302 |\\n| [hnawaz007/pythondataanalysis](https://github.com/hnawaz007/pythondataanalysis) | 301 |\\n| [Chainlit/cookbook](https://github.com/Chainlit/cookbook) | 300 |\\n| [airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 300 |\\n| [GPT-Fathom/GPT-Fathom](https://github.com/GPT-Fathom/GPT-Fathom) | 299 |\\n| [kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 299 |\\n| [kyegomez/swarms](https://github.com/kyegomez/swarms) | 296 |\\n| [LangStream/langstream](https://github.com/LangStream/langstream) | 295 |\\n| [genia-dev/GeniA](https://github.com/genia-dev/GeniA) | 294 |\\n| [shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 291 |\\n| [TsinghuaDatabaseGroup/DB-GPT](https://github.com/TsinghuaDatabaseGroup/DB-GPT) | 290 |\\n| [conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 283 |\\n| [sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 283 |\\n| [AutoPackAI/beebot](https://github.com/AutoPackAI/beebot) | 282 |\\n| [pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 282 |\\n| [gkamradt/LLMTest\\\\_NeedleInAHaystack](https://github.com/gkamradt/LLMTest_NeedleInAHaystack) | 280 |\\n| [gustavz/DataChad](https://github.com/gustavz/DataChad) | 280 |\\n| [Safiullah-Rahu/CSV-AI](https://github.com/Safiullah-Rahu/CSV-AI) | 278 |\\n| [hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 275 |\\n| [AkshitIreddy/Interactive-LLM-Powered-NPCs](https://github.com/AkshitIreddy/Interactive-LLM-Powered-NPCs) | 268 |\\n| [ennucore/clippinator](https://github.com/ennucore/clippinator) | 267 |\\n| [artitw/text2text](https://github.com/artitw/text2text) | 264 |\\n| [anarchy-ai/LLM-VM](https://github.com/anarchy-ai/LLM-VM) | 263 |\\n| [wpydcr/LLM-Kit](https://github.com/wpydcr/LLM-Kit) | 262 |\\n| [streamlit/llm-examples](https://github.com/streamlit/llm-examples) | 262 |\\n| [paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 262 |\\n| [yym68686/ChatGPT-Telegram-Bot](https://github.com/yym68686/ChatGPT-Telegram-Bot) | 261 |\\n| [PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 259 |\\n| [radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 259 |\\n| [ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 259 |\\n| [ml6team/fondant](https://github.com/ml6team/fondant) | 254 |\\n| [bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 254 |\\n| [rahulnyk/knowledge\\\\_graph](https://github.com/rahulnyk/knowledge_graph) | 253 |\\n| [recalign/RecAlign](https://github.com/recalign/RecAlign) | 248 |\\n| [hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 248 |\\n| [fetchai/uAgents](https://github.com/fetchai/uAgents) | 247 |\\n| [arthur-ai/bench](https://github.com/arthur-ai/bench) | 247 |\\n| [miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 246 |\\n| [RoboCoachTechnologies/GPT-Synthesizer](https://github.com/RoboCoachTechnologies/GPT-Synthesizer) | 244 |\\n| [langchain-ai/web-explorer](https://github.com/langchain-ai/web-explorer) | 242 |\\n| [kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 242 |\\n| [PJLab-ADG/DriveLikeAHuman](https://github.com/PJLab-ADG/DriveLikeAHuman) | 241 |\\n| [stepanogil/autonomous-hr-chatbot](https://github.com/stepanogil/autonomous-hr-chatbot) | 238 |\\n| [WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 236 |\\n| [nexus-stc/stc](https://github.com/nexus-stc/stc) | 235 |\\n| [yeagerai/genworlds](https://github.com/yeagerai/genworlds) | 235 |\\n| [Gentopia-AI/Gentopia](https://github.com/Gentopia-AI/Gentopia) | 235 |\\n| [alphasecio/langchain-examples](https://github.com/alphasecio/langchain-examples) | 235 |\\n| [grumpyp/aixplora](https://github.com/grumpyp/aixplora) | 232 |\\n| [shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 232 |\\n| [darrenburns/elia](https://github.com/darrenburns/elia) | 231 |\\n| [orgexyz/BlockAGI](https://github.com/orgexyz/BlockAGI) | 231 |\\n| [handrew/browserpilot](https://github.com/handrew/browserpilot) | 226 |\\n| [su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 225 |\\n| [nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 225 |\\n| [dbpunk-labs/octogen](https://github.com/dbpunk-labs/octogen) | 224 |\\n| [langchain-ai/weblangchain](https://github.com/langchain-ai/weblangchain) | 222 |\\n| [CL-lau/SQL-GPT](https://github.com/CL-lau/SQL-GPT) | 222 |\\n| [alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 221 |\\n| [showlab/UniVTG](https://github.com/showlab/UniVTG) | 220 |\\n| [edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 219 |\\n| [hardbyte/qabot](https://github.com/hardbyte/qabot) | 216 |\\n| [microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 215 |\\n| [Azure-Samples/chat-with-your-data-solution-accelerator](https://github.com/Azure-Samples/chat-with-your-data-solution-accelerator) | 214 |\\n| [amadad/agentcy](https://github.com/amadad/agentcy) | 213 |\\n| [snexus/llm-search](https://github.com/snexus/llm-search) | 212 |\\n| [afaqueumer/DocQA](https://github.com/afaqueumer/DocQA) | 206 |\\n| [plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 205 |\\n| [yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 205 |\\n| [benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 205 |\\n| [voxel51/voxelgpt](https://github.com/voxel51/voxelgpt) | 204 |\\n| [jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 204 |\\n| [emarco177/ice\\\\_breaker](https://github.com/emarco177/ice_breaker) | 204 |\\n| [tencentmusic/supersonic](https://github.com/tencentmusic/supersonic) | 202 |\\n| [Azure-Samples/azure-search-power-skills](https://github.com/Azure-Samples/azure-search-power-skills) | 202 |\\n| [blob42/Instrukt](https://github.com/blob42/Instrukt) | 201 |\\n| [langchain-ai/langsmith-sdk](https://github.com/langchain-ai/langsmith-sdk) | 200 |\\n| [SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 200 |\\n| [ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) | 198 |\\n| [KMnO4-zx/huanhuan-chat](https://github.com/KMnO4-zx/huanhuan-chat) | 196 |\\n| [Azure-Samples/jp-azureopenai-samples](https://github.com/Azure-Samples/jp-azureopenai-samples) | 192 |\\n| [hongbo-miao/hongbomiao.com](https://github.com/hongbo-miao/hongbomiao.com) | 190 |\\n| [CakeCrusher/openplugin](https://github.com/CakeCrusher/openplugin) | 190 |\\n| [PaddlePaddle/ERNIE-Bot-SDK](https://github.com/PaddlePaddle/ERNIE-Bot-SDK) | 189 |\\n| [retr0reg/Ret2GPT](https://github.com/retr0reg/Ret2GPT) | 189 |\\n| [AmineDiro/cria](https://github.com/AmineDiro/cria) | 187 |\\n| [lancedb/vectordb-recipes](https://github.com/lancedb/vectordb-recipes) | 186 |\\n| [vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 185 |\\n| [aws-ia/ecs-blueprints](https://github.com/aws-ia/ecs-blueprints) | 184 |\\n| [ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 183 |\\n| [MuhammadMoinFaisal/LargeLanguageModelsProjects](https://github.com/MuhammadMoinFaisal/LargeLanguageModelsProjects) | 182 |\\n| [shauryr/S2QA](https://github.com/shauryr/S2QA) | 181 |\\n| [summarizepaper/summarizepaper](https://github.com/summarizepaper/summarizepaper) | 180 |\\n| [NomaDamas/RAGchain](https://github.com/NomaDamas/RAGchain) | 179 |\\n| [pnkvalavala/repochat](https://github.com/pnkvalavala/repochat) | 179 |\\n| [ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 177 |\\n| [fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 177 |\\n| [langchain-ai/text-split-explorer](https://github.com/langchain-ai/text-split-explorer) | 175 |\\n| [iMagist486/ElasticSearch-Langchain-Chatglm2](https://github.com/iMagist486/ElasticSearch-Langchain-Chatglm2) | 175 |\\n| [limaoyi1/Auto-PPT](https://github.com/limaoyi1/Auto-PPT) | 175 |\\n| [Open-Swarm-Net/GPT-Swarm](https://github.com/Open-Swarm-Net/GPT-Swarm) | 175 |\\n| [morpheuslord/HackBot](https://github.com/morpheuslord/HackBot) | 174 |\\n| [v7labs/benchllm](https://github.com/v7labs/benchllm) | 174 |\\n| [Coding-Crashkurse/Langchain-Full-Course](https://github.com/Coding-Crashkurse/Langchain-Full-Course) | 174 |\\n| [dongyh20/Octopus](https://github.com/dongyh20/Octopus) | 173 |\\n| [kimtth/azure-openai-llm-vector-langchain](https://github.com/kimtth/azure-openai-llm-vector-langchain) | 173 |\\n| [mayooear/private-chatbot-mpt30b-langchain](https://github.com/mayooear/private-chatbot-mpt30b-langchain) | 173 |\\n| [zilliztech/akcio](https://github.com/zilliztech/akcio) | 172 |\\n| [jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 172 |\\n| [ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 172 |\\n| [joaomdmoura/CrewAI](https://github.com/joaomdmoura/CrewAI) | 170 |\\n| [katanaml/llm-mistral-invoice-cpu](https://github.com/katanaml/llm-mistral-invoice-cpu) | 170 |\\n| [chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 170 |\\n| [mudler/LocalAGI](https://github.com/mudler/LocalAGI) | 167 |\\n| [dssjon/biblos](https://github.com/dssjon/biblos) | 165 |\\n| [kjappelbaum/gptchem](https://github.com/kjappelbaum/gptchem) | 165 |\\n| [xxw1995/chatglm3-finetune](https://github.com/xxw1995/chatglm3-finetune) | 164 |\\n| [ArjanCodes/examples](https://github.com/ArjanCodes/examples) | 163 |\\n| [AIAnytime/Llama2-Medical-Chatbot](https://github.com/AIAnytime/Llama2-Medical-Chatbot) | 163 |\\n| [RCGAI/SimplyRetrieve](https://github.com/RCGAI/SimplyRetrieve) | 162 |\\n| [langchain-ai/langchain-teacher](https://github.com/langchain-ai/langchain-teacher) | 162 |\\n| [menloparklab/falcon-langchain](https://github.com/menloparklab/falcon-langchain) | 162 |\\n| [flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 162 |\\n| [homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 161 |\\n| [jiran214/langup-ai](https://github.com/jiran214/langup-ai) | 160 |\\n| [JorisdeJong123/7-Days-of-LangChain](https://github.com/JorisdeJong123/7-Days-of-LangChain) | 160 |\\n| [GoogleCloudPlatform/data-analytics-golden-demo](https://github.com/GoogleCloudPlatform/data-analytics-golden-demo) | 159 |\\n| [positive666/Prompt-Can-Anything](https://github.com/positive666/Prompt-Can-Anything) | 159 |\\n| [luisroque/large\\\\_laguage\\\\_models](https://github.com/luisroque/large_laguage_models) | 159 |\\n| [mlops-for-all/mlops-for-all.github.io](https://github.com/mlops-for-all/mlops-for-all.github.io) | 158 |\\n| [wandb/wandbot](https://github.com/wandb/wandbot) | 158 |\\n| [elastic/elasticsearch-labs](https://github.com/elastic/elasticsearch-labs) | 157 |\\n| [shroominic/funcchain](https://github.com/shroominic/funcchain) | 157 |\\n| [deeppavlov/dream](https://github.com/deeppavlov/dream) | 156 |\\n| [mluogh/eastworld](https://github.com/mluogh/eastworld) | 154 |\\n| [georgesung/llm\\\\_qlora](https://github.com/georgesung/llm_qlora) | 154 |\\n| [RUC-GSAI/YuLan-Rec](https://github.com/RUC-GSAI/YuLan-Rec) | 153 |\\n| [KylinC/ChatFinance](https://github.com/KylinC/ChatFinance) | 152 |\\n| [Dicklesworthstone/llama2\\\\_aided\\\\_tesseract](https://github.com/Dicklesworthstone/llama2_aided_tesseract) | 152 |\\n| [c0sogi/LLMChat](https://github.com/c0sogi/LLMChat) | 152 |\\n| [eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 152 |\\n| [ErikBjare/gptme](https://github.com/ErikBjare/gptme) | 152 |\\n| [Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 152 |\\n| [RoboCoachTechnologies/ROScribe](https://github.com/RoboCoachTechnologies/ROScribe) | 151 |\\n| [Aggregate-Intellect/sherpa](https://github.com/Aggregate-Intellect/sherpa) | 151 |\\n| [3Alan/DocsMind](https://github.com/3Alan/DocsMind) | 151 |\\n| [tangqiaoyu/ToolAlpaca](https://github.com/tangqiaoyu/ToolAlpaca) | 150 |\\n| [kulltc/chatgpt-sql](https://github.com/kulltc/chatgpt-sql) | 150 |\\n| [mallahyari/drqa](https://github.com/mallahyari/drqa) | 150 |\\n| [MedalCollector/Orator](https://github.com/MedalCollector/Orator) | 149 |\\n| [Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 149 |\\n| [realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 148 |\\n| [ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 148 |\\n| [solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 147 |\\n| [aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 147 |\\n| [Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 146 |\\n| [menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 146 |\\n| [trancethehuman/entities-extraction-web-scraper](https://github.com/trancethehuman/entities-extraction-web-scraper) | 144 |\\n| [peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 144 |\\n| [grumpyp/chroma-langchain-tutorial](https://github.com/grumpyp/chroma-langchain-tutorial) | 144 |\\n| [gh18l/CrawlGPT](https://github.com/gh18l/CrawlGPT) | 142 |\\n| [langchain-ai/langchain-aws-template](https://github.com/langchain-ai/langchain-aws-template) | 142 |\\n| [yasyf/summ](https://github.com/yasyf/summ) | 141 |\\n| [petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 141 |\\n| [hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 140 |\\n| [jina-ai/fastapi-serve](https://github.com/jina-ai/fastapi-serve) | 139 |\\n| [zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 139 |\\n| [jlonge4/local\\\\_llama](https://github.com/jlonge4/local_llama) | 139 |\\n| [smyja/blackmaria](https://github.com/smyja/blackmaria) | 138 |\\n| [ChuloAI/BrainChulo](https://github.com/ChuloAI/BrainChulo) | 137 |\\n| [log1stics/voice-generator-webui](https://github.com/log1stics/voice-generator-webui) | 137 |\\n| [davila7/file-gpt](https://github.com/davila7/file-gpt) | 137 |\\n| [dcaribou/transfermarkt-datasets](https://github.com/dcaribou/transfermarkt-datasets) | 136 |\\n| [ciare-robotics/world-creator](https://github.com/ciare-robotics/world-creator) | 135 |\\n| [Undertone0809/promptulate](https://github.com/Undertone0809/promptulate) | 134 |\\n| [fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 134 |\\n| [run-llama/ai-engineer-workshop](https://github.com/run-llama/ai-engineer-workshop) | 133 |\\n| [definitive-io/code-indexer-loop](https://github.com/definitive-io/code-indexer-loop) | 131 |\\n| [mortium91/langchain-assistant](https://github.com/mortium91/langchain-assistant) | 131 |\\n| [baidubce/bce-qianfan-sdk](https://github.com/baidubce/bce-qianfan-sdk) | 130 |\\n| [Ngonie-x/langchain\\\\_csv](https://github.com/Ngonie-x/langchain_csv) | 130 |\\n| [IvanIsCoding/ResuLLMe](https://github.com/IvanIsCoding/ResuLLMe) | 130 |\\n| [AnchoringAI/anchoring-ai](https://github.com/AnchoringAI/anchoring-ai) | 129 |\\n| [Azure/business-process-automation](https://github.com/Azure/business-process-automation) | 128 |\\n| [athina-ai/athina-sdk](https://github.com/athina-ai/athina-sdk) | 126 |\\n| [thunlp/ChatEval](https://github.com/thunlp/ChatEval) | 126 |\\n| [prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 126 |\\n| [vietanhdev/pautobot](https://github.com/vietanhdev/pautobot) | 125 |\\n| [awslabs/generative-ai-cdk-constructs](https://github.com/awslabs/generative-ai-cdk-constructs) | 124 |\\n| [sdaaron/QueryGPT](https://github.com/sdaaron/QueryGPT) | 124 |\\n| [rabbitmetrics/langchain-13-min](https://github.com/rabbitmetrics/langchain-13-min) | 124 |\\n| [AutoLLM/AutoAgents](https://github.com/AutoLLM/AutoAgents) | 122 |\\n| [nicknochnack/Nopenai](https://github.com/nicknochnack/Nopenai) | 122 |\\n| [wombyz/HormoziGPT](https://github.com/wombyz/HormoziGPT) | 122 |\\n| [dotvignesh/PDFChat](https://github.com/dotvignesh/PDFChat) | 122 |\\n| [topoteretes/PromethAI-Backend](https://github.com/topoteretes/PromethAI-Backend) | 121 |\\n| [nftblackmagic/flask-langchain](https://github.com/nftblackmagic/flask-langchain) | 121 |\\n| [vishwasg217/finsight](https://github.com/vishwasg217/finsight) | 120 |\\n| [snap-stanford/MLAgentBench](https://github.com/snap-stanford/MLAgentBench) | 120 |\\n| [Azure/app-service-linux-docs](https://github.com/Azure/app-service-linux-docs) | 120 |\\n| [nyanp/chat2plot](https://github.com/nyanp/chat2plot) | 120 |\\n| [ant4g0nist/polar](https://github.com/ant4g0nist/polar) | 119 |\\n| [aws-samples/cdk-eks-blueprints-patterns](https://github.com/aws-samples/cdk-eks-blueprints-patterns) | 119 |\\n| [aws-samples/amazon-kendra-langchain-extensions](https://github.com/aws-samples/amazon-kendra-langchain-extensions) | 119 |\\n| [Xueheng-Li/SynologyChatbotGPT](https://github.com/Xueheng-Li/SynologyChatbotGPT) | 119 |\\n| [CodeAlchemyAI/ViLT-GPT](https://github.com/CodeAlchemyAI/ViLT-GPT) | 117 |\\n| [Lin-jun-xiang/docGPT-langchain](https://github.com/Lin-jun-xiang/docGPT-langchain) | 117 |\\n| [ademakdogan/ChatSQL](https://github.com/ademakdogan/ChatSQL) | 116 |\\n| [aniketmaurya/llm-inference](https://github.com/aniketmaurya/llm-inference) | 115 |\\n| [xuwenhao/mactalk-ai-course](https://github.com/xuwenhao/mactalk-ai-course) | 115 |\\n| [cmooredev/RepoReader](https://github.com/cmooredev/RepoReader) | 115 |\\n| [abi/autocommit](https://github.com/abi/autocommit) | 115 |\\n| [MIDORIBIN/langchain-gpt4free](https://github.com/MIDORIBIN/langchain-gpt4free) | 114 |\\n| [finaldie/auto-news](https://github.com/finaldie/auto-news) | 114 |\\n| [Anil-matcha/Youtube-to-chatbot](https://github.com/Anil-matcha/Youtube-to-chatbot) | 114 |\\n| [avrabyt/MemoryBot](https://github.com/avrabyt/MemoryBot) | 114 |\\n| [Capsize-Games/airunner](https://github.com/Capsize-Games/airunner) | 113 |\\n| [atisharma/llama\\\\_farm](https://github.com/atisharma/llama_farm) | 113 |\\n| [mbchang/data-driven-characters](https://github.com/mbchang/data-driven-characters) | 112 |\\n| [fiddler-labs/fiddler-auditor](https://github.com/fiddler-labs/fiddler-auditor) | 112 |\\n| [dirkjbreeuwer/gpt-automated-web-scraper](https://github.com/dirkjbreeuwer/gpt-automated-web-scraper) | 111 |\\n| [Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding](https://github.com/Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding) | 111 |\\n| [hwchase17/langchain-gradio-template](https://github.com/hwchase17/langchain-gradio-template) | 111 |\\n| [artas728/spelltest](https://github.com/artas728/spelltest) | 110 |\\n| [NVIDIA/GenerativeAIExamples](https://github.com/NVIDIA/GenerativeAIExamples) | 109 |\\n| [Azure/aistudio-copilot-sample](https://github.com/Azure/aistudio-copilot-sample) | 108 |\\n| [codefuse-ai/codefuse-chatbot](https://github.com/codefuse-ai/codefuse-chatbot) | 108 |\\n| [apirrone/Memento](https://github.com/apirrone/Memento) | 108 |\\n| [e-johnstonn/GPT-Doc-Summarizer](https://github.com/e-johnstonn/GPT-Doc-Summarizer) | 108 |\\n| [salesforce/BOLAA](https://github.com/salesforce/BOLAA) | 107 |\\n| [Erol444/gpt4-openai-api](https://github.com/Erol444/gpt4-openai-api) | 106 |\\n| [linjungz/chat-with-your-doc](https://github.com/linjungz/chat-with-your-doc) | 106 |\\n| [crosleythomas/MirrorGPT](https://github.com/crosleythomas/MirrorGPT) | 106 |\\n| [panaverse/learn-generative-ai](https://github.com/panaverse/learn-generative-ai) | 105 |\\n| [Azure/azure-sdk-tools](https://github.com/Azure/azure-sdk-tools) | 105 |\\n| [malywut/gpt\\\\_examples](https://github.com/malywut/gpt_examples) | 105 |\\n| [ritun16/chain-of-verification](https://github.com/ritun16/chain-of-verification) | 104 |\\n| [langchain-ai/langchain-benchmarks](https://github.com/langchain-ai/langchain-benchmarks) | 104 |\\n| [lightninglabs/LangChainBitcoin](https://github.com/lightninglabs/LangChainBitcoin) | 104 |\\n| [flepied/second-brain-agent](https://github.com/flepied/second-brain-agent) | 103 |\\n| [llmapp/openai.mini](https://github.com/llmapp/openai.mini) | 102 |\\n| [gimlet-ai/tddGPT](https://github.com/gimlet-ai/tddGPT) | 102 |\\n| [jlonge4/gpt\\\\_chatwithPDF](https://github.com/jlonge4/gpt_chatwithPDF) | 102 |\\n| [agentification/RAFA\\\\_code](https://github.com/agentification/RAFA_code) | 101 |\\n| [pacman100/DHS-LLM-Workshop](https://github.com/pacman100/DHS-LLM-Workshop) | 101 |\\n| [aws-samples/private-llm-qa-bot](https://github.com/aws-samples/private-llm-qa-bot) | 101 |\\n\\n_Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)\\n_\\n\\n`github-dependents-info --repo \"langchain-ai/langchain\" --markdownfile dependents.md --minstars 100 --sort stars`\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/dependents.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/dependents/', 'pageStatusCode': 200}}, {'content': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\n3rd Party Tutorials\\n===================\\n\\nTutorials[\\u200b](#tutorials \"Direct link to Tutorials\")\\n\\n----------------------------------------------------\\n\\n### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)\\n[\\u200b](#langchain-v-01-by-langchainai \"Direct link to langchain-v-01-by-langchainai\")\\n\\n### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)\\n[\\u200b](#build-with-langchain---advanced-by-langchainai \"Direct link to build-with-langchain---advanced-by-langchainai\")\\n\\n### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)\\n[\\u200b](#langgraph-by-langchainai \"Direct link to langgraph-by-langchainai\")\\n\\n### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)\\n[\\u200b](#by-greg-kamradt \"Direct link to by-greg-kamradt\")\\n\\n### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)\\n[\\u200b](#by-sam-witteveen \"Direct link to by-sam-witteveen\")\\n\\n### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)\\n[\\u200b](#by-james-briggs \"Direct link to by-james-briggs\")\\n\\n### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)\\n[\\u200b](#by-prompt-engineering \"Direct link to by-prompt-engineering\")\\n\\n### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)\\n[\\u200b](#by-mayo-oshin \"Direct link to by-mayo-oshin\")\\n\\n### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)\\n[\\u200b](#by-1-little-coder \"Direct link to by-1-little-coder\")\\n\\n### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)\\n[\\u200b](#by-boblin-chinese-language \"Direct link to by-boblin-chinese-language\")\\n\\nCourses[\\u200b](#courses \"Direct link to Courses\")\\n\\n----------------------------------------------\\n\\n### Featured courses on Deeplearning.AI[\\u200b](#featured-courses-on-deeplearningai \"Direct link to Featured courses on Deeplearning.AI\")\\n\\n* [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)\\n \\n* [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)\\n \\n* [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)\\n \\n* [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)\\n \\n\\n### Online courses[\\u200b](#online-courses \"Direct link to Online courses\")\\n\\n* [Udemy](https://www.udemy.com/courses/search/?q=langchain)\\n \\n* [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)\\n \\n* [Pluralsight](https://www.pluralsight.com/search?q=langchain)\\n \\n* [Coursera](https://www.coursera.org/search?query=langchain)\\n \\n* [Maven](https://maven.com/courses?query=langchain)\\n \\n* [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)\\n \\n* [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)\\n \\n* [edX](https://www.edx.org/search?q=langchain)\\n \\n* [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)\\n \\n\\nShort Tutorials[\\u200b](#short-tutorials \"Direct link to Short Tutorials\")\\n\\n----------------------------------------------------------------------\\n\\n* [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8)\\n \\n* [by Patrick Loeber](https://youtu.be/LbT1yp6quS8)\\n \\n* [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)\\n \\n* [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)\\n \\n\\nBooks and Handbooks[\\u200b](#books-and-handbooks \"Direct link to Books and Handbooks\")\\n\\n----------------------------------------------------------------------------------\\n\\n* [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1)\\n by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true)\\n , ©️ 2023 Packt Publishing\\n* [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/)\\n By **James Briggs** and **Francisco Ingham**\\n* [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde)\\n by **Ivan Reznikov**\\n* [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)\\n \\n\\n* * *\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/tutorials.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Tutorials](#tutorials)\\n * [LangChain v 0.1 by LangChain.ai](#langchain-v-01-by-langchainai)\\n \\n * [Build with Langchain - Advanced by LangChain.ai](#build-with-langchain---advanced-by-langchainai)\\n \\n * [LangGraph by LangChain.ai](#langgraph-by-langchainai)\\n \\n * [by Greg Kamradt](#by-greg-kamradt)\\n \\n * [by Sam Witteveen](#by-sam-witteveen)\\n \\n * [by James Briggs](#by-james-briggs)\\n \\n * [by Prompt Engineering](#by-prompt-engineering)\\n \\n * [by Mayo Oshin](#by-mayo-oshin)\\n \\n * [by 1 little Coder](#by-1-little-coder)\\n \\n * [by BobLin (Chinese language)](#by-boblin-chinese-language)\\n \\n* [Courses](#courses)\\n * [Featured courses on Deeplearning.AI](#featured-courses-on-deeplearningai)\\n \\n * [Online courses](#online-courses)\\n \\n* [Short Tutorials](#short-tutorials)\\n \\n* [Books and Handbooks](#books-and-handbooks)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\n3rd Party Tutorials\\n===================\\n\\nTutorials[\\u200b](#tutorials \"Direct link to Tutorials\")\\n\\n----------------------------------------------------\\n\\n### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)\\n[\\u200b](#langchain-v-01-by-langchainai \"Direct link to langchain-v-01-by-langchainai\")\\n\\n### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)\\n[\\u200b](#build-with-langchain---advanced-by-langchainai \"Direct link to build-with-langchain---advanced-by-langchainai\")\\n\\n### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)\\n[\\u200b](#langgraph-by-langchainai \"Direct link to langgraph-by-langchainai\")\\n\\n### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)\\n[\\u200b](#by-greg-kamradt \"Direct link to by-greg-kamradt\")\\n\\n### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)\\n[\\u200b](#by-sam-witteveen \"Direct link to by-sam-witteveen\")\\n\\n### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)\\n[\\u200b](#by-james-briggs \"Direct link to by-james-briggs\")\\n\\n### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)\\n[\\u200b](#by-prompt-engineering \"Direct link to by-prompt-engineering\")\\n\\n### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)\\n[\\u200b](#by-mayo-oshin \"Direct link to by-mayo-oshin\")\\n\\n### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)\\n[\\u200b](#by-1-little-coder \"Direct link to by-1-little-coder\")\\n\\n### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)\\n[\\u200b](#by-boblin-chinese-language \"Direct link to by-boblin-chinese-language\")\\n\\nCourses[\\u200b](#courses \"Direct link to Courses\")\\n\\n----------------------------------------------\\n\\n### Featured courses on Deeplearning.AI[\\u200b](#featured-courses-on-deeplearningai \"Direct link to Featured courses on Deeplearning.AI\")\\n\\n* [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)\\n \\n* [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)\\n \\n* [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)\\n \\n* [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)\\n \\n\\n### Online courses[\\u200b](#online-courses \"Direct link to Online courses\")\\n\\n* [Udemy](https://www.udemy.com/courses/search/?q=langchain)\\n \\n* [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)\\n \\n* [Pluralsight](https://www.pluralsight.com/search?q=langchain)\\n \\n* [Coursera](https://www.coursera.org/search?query=langchain)\\n \\n* [Maven](https://maven.com/courses?query=langchain)\\n \\n* [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)\\n \\n* [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)\\n \\n* [edX](https://www.edx.org/search?q=langchain)\\n \\n* [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)\\n \\n\\nShort Tutorials[\\u200b](#short-tutorials \"Direct link to Short Tutorials\")\\n\\n----------------------------------------------------------------------\\n\\n* [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8)\\n \\n* [by Patrick Loeber](https://youtu.be/LbT1yp6quS8)\\n \\n* [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)\\n \\n* [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)\\n \\n\\nBooks and Handbooks[\\u200b](#books-and-handbooks \"Direct link to Books and Handbooks\")\\n\\n----------------------------------------------------------------------------------\\n\\n* [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1)\\n by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true)\\n , ©️ 2023 Packt Publishing\\n* [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/)\\n By **James Briggs** and **Francisco Ingham**\\n* [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde)\\n by **Ivan Reznikov**\\n* [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)\\n \\n\\n* * *\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/tutorials.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Tutorials](#tutorials)\\n * [LangChain v 0.1 by LangChain.ai](#langchain-v-01-by-langchainai)\\n \\n * [Build with Langchain - Advanced by LangChain.ai](#build-with-langchain---advanced-by-langchainai)\\n \\n * [LangGraph by LangChain.ai](#langgraph-by-langchainai)\\n \\n * [by Greg Kamradt](#by-greg-kamradt)\\n \\n * [by Sam Witteveen](#by-sam-witteveen)\\n \\n * [by James Briggs](#by-james-briggs)\\n \\n * [by Prompt Engineering](#by-prompt-engineering)\\n \\n * [by Mayo Oshin](#by-mayo-oshin)\\n \\n * [by 1 little Coder](#by-1-little-coder)\\n \\n * [by BobLin (Chinese language)](#by-boblin-chinese-language)\\n \\n* [Courses](#courses)\\n * [Featured courses on Deeplearning.AI](#featured-courses-on-deeplearningai)\\n \\n * [Online courses](#online-courses)\\n \\n* [Short Tutorials](#short-tutorials)\\n \\n* [Books and Handbooks](#books-and-handbooks)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/tutorials/', 'pageStatusCode': 200}}, {'content': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\nYouTube videos\\n==============\\n\\n\\\\[Updated 2024-05-16\\\\]\\n\\n### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)\\n[\\u200b](#official-langchain-youtube-channel \"Direct link to official-langchain-youtube-channel\")\\n\\n### [Tutorials on YouTube](/v0.2/docs/additional_resources/tutorials/#tutorials)\\n[\\u200b](#tutorials-on-youtube \"Direct link to tutorials-on-youtube\")\\n\\nVideos (sorted by views)[\\u200b](#videos-sorted-by-views \"Direct link to Videos (sorted by views)\")\\n\\n-----------------------------------------------------------------------------------------------\\n\\nOnly videos with 40K+ views:\\n\\n* [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)\\n \\n* [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)\\n \\n* [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)\\n \\n* [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)\\n \\n* [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)\\n \\n* [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)\\n \\n* [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)\\n \\n* [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)\\n \\n* [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)\\n \\n* [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)\\n \\n* [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)\\n \\n* [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)\\n \\n* [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)\\n \\n* [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)\\n \\n* [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)\\n \\n* [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)\\n \\n* [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)\\n \\n* [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)\\n \\n* [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)\\n \\n* [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)\\n \\n* [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)\\n \\n* [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)\\n \\n* [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)\\n \\n* [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)\\n \\n* [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)\\n \\n* [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)\\n \\n* [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)\\n \\n* [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)\\n \\n* [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)\\n \\n* [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)\\n \\n* [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)\\n \\n* [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)\\n \\n* [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)\\n \\n* [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)\\n \\n* [Prompt Engineering And LLM\\'s With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)\\n \\n* [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)\\n \\n* [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)\\n \\n* [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)\\n \\n* [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)\\n \\n* [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)\\n \\n* [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)\\n \\n* [What\\'s next for AI agents ft. LangChain\\'s Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)\\n \\n* [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)\\n \\n* [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)\\n \\n* [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)\\n \\n* [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)\\n \\n* [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)\\n \\n* [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)\\n \\n\\n* * *\\n\\n\\\\[Updated 2024-05-16\\\\]\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/youtube.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Official LangChain YouTube channel](#official-langchain-youtube-channel)\\n \\n* [Tutorials on YouTube](#tutorials-on-youtube)\\n \\n* [Videos (sorted by views)](#videos-sorted-by-views)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\nYouTube videos\\n==============\\n\\n\\\\[Updated 2024-05-16\\\\]\\n\\n### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)\\n[\\u200b](#official-langchain-youtube-channel \"Direct link to official-langchain-youtube-channel\")\\n\\n### [Tutorials on YouTube](/v0.2/docs/additional_resources/tutorials/#tutorials)\\n[\\u200b](#tutorials-on-youtube \"Direct link to tutorials-on-youtube\")\\n\\nVideos (sorted by views)[\\u200b](#videos-sorted-by-views \"Direct link to Videos (sorted by views)\")\\n\\n-----------------------------------------------------------------------------------------------\\n\\nOnly videos with 40K+ views:\\n\\n* [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)\\n \\n* [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)\\n \\n* [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)\\n \\n* [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)\\n \\n* [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)\\n \\n* [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)\\n \\n* [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)\\n \\n* [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)\\n \\n* [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)\\n \\n* [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)\\n \\n* [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)\\n \\n* [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)\\n \\n* [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)\\n \\n* [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)\\n \\n* [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)\\n \\n* [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)\\n \\n* [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)\\n \\n* [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)\\n \\n* [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)\\n \\n* [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)\\n \\n* [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)\\n \\n* [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)\\n \\n* [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)\\n \\n* [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)\\n \\n* [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)\\n \\n* [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)\\n \\n* [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)\\n \\n* [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)\\n \\n* [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)\\n \\n* [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)\\n \\n* [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)\\n \\n* [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)\\n \\n* [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)\\n \\n* [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)\\n \\n* [Prompt Engineering And LLM\\'s With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)\\n \\n* [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)\\n \\n* [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)\\n \\n* [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)\\n \\n* [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)\\n \\n* [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)\\n \\n* [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)\\n \\n* [What\\'s next for AI agents ft. LangChain\\'s Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)\\n \\n* [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)\\n \\n* [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)\\n \\n* [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)\\n \\n* [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)\\n \\n* [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)\\n \\n* [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)\\n \\n\\n* * *\\n\\n\\\\[Updated 2024-05-16\\\\]\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/youtube.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Official LangChain YouTube channel](#official-langchain-youtube-channel)\\n \\n* [Tutorials on YouTube](#tutorials-on-youtube)\\n \\n* [Videos (sorted by views)](#videos-sorted-by-views)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/youtube/', 'pageStatusCode': 200}}]\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# Initialize the FirecrawlApp with your API key\n",
|
||
"app = FirecrawlApp(api_key=firecrawl_api_key)\n",
|
||
"\n",
|
||
"# Crawl a website\n",
|
||
"crawl_url = 'https://python.langchain.com/v0.2/docs'\n",
|
||
"params = {\n",
|
||
" 'crawlerOptions': {\n",
|
||
" 'limit': 5\n",
|
||
" }\n",
|
||
"}\n",
|
||
"crawl_result = app.crawl_url(crawl_url, params=params)\n",
|
||
"cleaned_crawl_result = []\n",
|
||
"if crawl_result is not None:\n",
|
||
" # Convert crawl results to JSON format, excluding 'content' field from each entry\n",
|
||
" cleaned_crawl_result = [{k: v for k, v in entry.items() if k != 'content'} for entry in crawl_result]\n",
|
||
"else:\n",
|
||
" print(\"No data returned from crawl.\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 4,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"MODEL_NAME = \"claude-3-5-sonnet-20240620\"\n",
|
||
"\n",
|
||
"SYSTEM_PROMPT = \"\"\"\n",
|
||
"## your job & context\n",
|
||
"you are a python data scientist. you are given tasks to complete and you run python code to solve them.\n",
|
||
"- the python code runs in jupyter notebook.\n",
|
||
"- every time you call `execute_python` tool, the python code is executed in a separate cell. it's okay to multiple calls to `execute_python`.\n",
|
||
"- display visualizations using matplotlib or any other visualization library directly in the notebook. don't worry about saving the visualizations to a file.\n",
|
||
"- you have access to the internet and can make api requests.\n",
|
||
"- you also have access to the filesystem and can read/write files.\n",
|
||
"- you can install any pip package (if it exists) if you need to but the usual packages for data analysis are already preinstalled.\n",
|
||
"- you can run any python code you want, everything is running in a secure sandbox environment.\n",
|
||
"\n",
|
||
"## style guide\n",
|
||
"tool response values that have text inside \"[]\" mean that a visual element got rended in the notebook. for example:\n",
|
||
"- \"[chart]\" means that a chart was generated in the notebook.\n",
|
||
"\"\"\"\n",
|
||
"\n",
|
||
"tools = [\n",
|
||
" {\n",
|
||
" \"name\": \"execute_python\",\n",
|
||
" \"description\": \"Execute python code in a Jupyter notebook cell and returns any result, stdout, stderr, display_data, and error.\",\n",
|
||
" \"input_schema\": {\n",
|
||
" \"type\": \"object\",\n",
|
||
" \"properties\": {\n",
|
||
" \"code\": {\n",
|
||
" \"type\": \"string\",\n",
|
||
" \"description\": \"The python code to execute in a single cell.\"\n",
|
||
" }\n",
|
||
" },\n",
|
||
" \"required\": [\"code\"]\n",
|
||
" }\n",
|
||
" }\n",
|
||
"]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 5,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def code_interpret(e2b_code_interpreter, code):\n",
|
||
" print(\"Running code interpreter...\")\n",
|
||
" exec = e2b_code_interpreter.notebook.exec_cell(\n",
|
||
" code,\n",
|
||
" on_stderr=lambda stderr: print(\"[Code Interpreter]\", stderr),\n",
|
||
" on_stdout=lambda stdout: print(\"[Code Interpreter]\", stdout),\n",
|
||
" # You can also stream code execution results\n",
|
||
" # on_result=...\n",
|
||
" )\n",
|
||
"\n",
|
||
" if exec.error:\n",
|
||
" print(\"[Code Interpreter ERROR]\", exec.error)\n",
|
||
" else:\n",
|
||
" return exec.results"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 6,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"from anthropic import Anthropic\n",
|
||
"client = Anthropic(\n",
|
||
" api_key=anthropic_api_key,\n",
|
||
")\n",
|
||
"\n",
|
||
"def process_tool_call(e2b_code_interpreter, tool_name, tool_input):\n",
|
||
" if tool_name == \"execute_python\":\n",
|
||
" return code_interpret(e2b_code_interpreter, tool_input[\"code\"])\n",
|
||
" return []\n",
|
||
"\n",
|
||
"def chat_with_claude(e2b_code_interpreter, user_message):\n",
|
||
" print(f\"\\n{'='*50}\\nUser Message: {user_message}\\n{'='*50}\")\n",
|
||
"\n",
|
||
" message = client.messages.create(\n",
|
||
" model=MODEL_NAME,\n",
|
||
" system=SYSTEM_PROMPT,\n",
|
||
" messages=[{\"role\": \"user\", \"content\": user_message}],\n",
|
||
" max_tokens=4096,\n",
|
||
" tools=tools,\n",
|
||
" )\n",
|
||
"\n",
|
||
" print(f\"\\nInitial Response:\")\n",
|
||
" print(f\"Stop Reason: {message.stop_reason}\")\n",
|
||
" print(f\"Content: {message.content}\")\n",
|
||
"\n",
|
||
" if message.stop_reason == \"tool_use\":\n",
|
||
" tool_use = next(block for block in message.content if block.type == \"tool_use\")\n",
|
||
" tool_name = tool_use.name\n",
|
||
" tool_input = tool_use.input\n",
|
||
"\n",
|
||
" print(f\"\\nTool Used: {tool_name}\")\n",
|
||
" print(f\"Tool Input: {tool_input}\")\n",
|
||
"\n",
|
||
" code_interpreter_results = process_tool_call(e2b_code_interpreter, tool_name, tool_input)\n",
|
||
"\n",
|
||
" print(f\"Tool Result: {code_interpreter_results}\")\n",
|
||
" return code_interpreter_results\n",
|
||
" \n",
|
||
"\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 14,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n",
|
||
"==================================================\n",
|
||
"User Message: Use python to identify the most common topics in the crawl results. For each topic, count the number of times it appears in the crawl results and plot them. Here is the crawl results: [{'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\narXiv\\n=====\\n\\nLangChain implements the latest research in the field of Natural Language Processing. This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference, Templates, and Cookbooks.\\n\\nFrom the opposite direction, scientists use LangChain in research and reference LangChain in the research papers. Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header)\\n.\\n\\nSummary[\\u200b](#summary \"Direct link to Summary\")\\n\\n----------------------------------------------\\n\\n| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation |\\n| --- | --- | --- | --- |\\n| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024-02-06 | `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb) |\\n| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024-01-31 | `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb) |\\n| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024-01-29 | `Cookbook:` [langgraph\\\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb) |\\n| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024-01-08 | `Cookbook:` [together\\\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb) |\\n| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval) |\\n| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki) |\\n| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023-10-17 | `Cookbook:` [langgraph\\\\_self\\\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb) |\\n| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)<br>, `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb) |\\n| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023-07-18 | `Cookbook:` [Semi\\\\_Structured\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) |\\n| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)<br>, `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb) |\\n| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain\\\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)<br>, `Cookbook:` [tree\\\\_of\\\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb) |\\n| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan\\\\_and\\\\_execute\\\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb) |\\n| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi\\\\_structured\\\\_and\\\\_multi\\\\_modal\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)<br>, [Semi\\\\_structured\\\\_multi\\\\_modal\\\\_RAG\\\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) |\\n| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O\\'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent\\\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)<br>, [generative\\\\_agents\\\\_interactive\\\\_simulacra\\\\_of\\\\_human\\\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) |\\n| `2303.17760v2` [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel\\\\_role\\\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb) |\\n| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain\\\\_experimental.autonomous\\\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)<br>, `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb) |\\n| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb\\\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas) |\\n| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)<br>, `Template:` [hyde](https://python.langchain.com/docs/templates/hyde)<br>, `Cookbook:` [hypothetical\\\\_document\\\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb) |\\n| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain\\\\_experimental.fallacy\\\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal) |\\n| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain\\\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector) |\\n| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain\\\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)<br>, [langchain\\\\_experimental.pal\\\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)<br>, `Cookbook:` [program\\\\_aided\\\\_language\\\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb) |\\n| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere)<br>, [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface)<br>, [docs/integrations/tools/ionic\\\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)<br>, `API:` [langchain...create\\\\_react\\\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)<br>, [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain) |\\n| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop\\\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake) |\\n| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain\\\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings) |\\n| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain\\\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)<br>, [langchain\\\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) |\\n| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain\\\\_experimental.open\\\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip) |\\n| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)<br>, [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference) |\\n| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text\\\\_embedding/sentence\\\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers) |\\n\\nSelf-Discover: Large Language Models Self-Compose Reasoning Structures[\\u200b](#self-discover-large-language-models-self-compose-reasoning-structures \"Direct link to Self-Discover: Large Language Models Self-Compose Reasoning Structures\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2402.03620v1\\n \\n* **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures\\n \\n* **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.\\n \\n* **Published Date:** 2024-02-06\\n \\n* **URL:** [http://arxiv.org/abs/2402.03620v1](http://arxiv.org/abs/2402.03620v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)\\n \\n\\n**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the task-intrinsic reasoning structures to tackle complex reasoning problems that are challenging for typical prompting methods. Core to the framework is a self-discovery process where LLMs select multiple atomic reasoning modules such as critical thinking and step-by-step thinking, and compose them into an explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER substantially improves GPT-4 and PaLM 2\\'s performance on challenging reasoning benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER outperforms inference-intensive methods such as CoT-Self-Consistency by more than 20%, while requiring 10-40x fewer inference compute. Finally, we show that the self-discovered reasoning structures are universally applicable across model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share commonalities with human reasoning patterns.\\n\\nRAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval[\\u200b](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval \"Direct link to RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.18059v1\\n \\n* **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval\\n \\n* **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.\\n \\n* **Published Date:** 2024-01-31\\n \\n* **URL:** [http://arxiv.org/abs/2401.18059v1](http://arxiv.org/abs/2401.18059v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)\\n \\n\\n**Abstract:** Retrieval-augmented language models can better adapt to changes in world state and incorporate long-tail knowledge. However, most existing methods retrieve only short contiguous chunks from a retrieval corpus, limiting holistic understanding of the overall document context. We introduce the novel approach of recursively embedding, clustering, and summarizing chunks of text, constructing a tree with differing levels of summarization from the bottom up. At inference time, our RAPTOR model retrieves from this tree, integrating information across lengthy documents at different levels of abstraction. Controlled experiments show that retrieval with recursive summaries offers significant improvements over traditional retrieval-augmented LMs on several tasks. On question-answering tasks that involve complex, multi-step reasoning, we show state-of-the-art results; for example, by coupling RAPTOR retrieval with the use of GPT-4, we can improve the best performance on the QuALITY benchmark by 20% in absolute accuracy.\\n\\nCorrective Retrieval Augmented Generation[\\u200b](#corrective-retrieval-augmented-generation \"Direct link to Corrective Retrieval Augmented Generation\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.15884v2\\n \\n* **Title:** Corrective Retrieval Augmented Generation\\n \\n* **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.\\n \\n* **Published Date:** 2024-01-29\\n \\n* **URL:** [http://arxiv.org/abs/2401.15884v2](http://arxiv.org/abs/2401.15884v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [langgraph\\\\_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the accuracy of generated texts cannot be secured solely by the parametric knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a practicable complement to LLMs, it relies heavily on the relevance of retrieved documents, raising concerns about how the model behaves if retrieval goes wrong. To this end, we propose the Corrective Retrieval Augmented Generation (CRAG) to improve the robustness of generation. Specifically, a lightweight retrieval evaluator is designed to assess the overall quality of retrieved documents for a query, returning a confidence degree based on which different knowledge retrieval actions can be triggered. Since retrieval from static and limited corpora can only return sub-optimal documents, large-scale web searches are utilized as an extension for augmenting the retrieval results. Besides, a decompose-then-recompose algorithm is designed for retrieved documents to selectively focus on key information and filter out irrelevant information in them. CRAG is plug-and-play and can be seamlessly coupled with various RAG-based approaches. Experiments on four datasets covering short- and long-form generation tasks show that CRAG can significantly improve the performance of RAG-based approaches.\\n\\nMixtral of Experts[\\u200b](#mixtral-of-experts \"Direct link to Mixtral of Experts\")\\n\\n-------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2401.04088v1\\n \\n* **Title:** Mixtral of Experts\\n \\n* **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.\\n \\n* **Published Date:** 2024-01-08\\n \\n* **URL:** [http://arxiv.org/abs/2401.04088v1](http://arxiv.org/abs/2401.04088v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [together\\\\_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)\\n \\n\\n**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model. Mixtral has the same architecture as Mistral 7B, with the difference that each layer is composed of 8 feedforward blocks (i.e. experts). For every token, at each layer, a router network selects two experts to process the current state and combine their outputs. Even though each token only sees two experts, the selected experts can be different at each timestep. As a result, each token has access to 47B parameters, but only uses 13B active parameters during inference. Mixtral was trained with a context size of 32k tokens and it outperforms or matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks. We also provide a model fine-tuned to follow instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo, Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both the base and instruct models are released under the Apache 2.0 license.\\n\\nDense X Retrieval: What Retrieval Granularity Should We Use?[\\u200b](#dense-x-retrieval-what-retrieval-granularity-should-we-use \"Direct link to Dense X Retrieval: What Retrieval Granularity Should We Use?\")\\n\\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2312.06648v2\\n \\n* **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?\\n \\n* **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.\\n \\n* **Published Date:** 2023-12-11\\n \\n* **URL:** [http://arxiv.org/abs/2312.06648v2](http://arxiv.org/abs/2312.06648v2)\\n \\n* **LangChain:**\\n \\n * **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)\\n \\n\\n**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or world knowledge in open-domain NLP tasks. When we use a learned dense retriever on a retrieval corpus at inference time, an often-overlooked design choice is the retrieval unit in which the corpus is indexed, e.g. document, passage, or sentence. We discover that the retrieval unit choice significantly impacts the performance of both retrieval and downstream tasks. Distinct from the typical approach of using passages or sentences, we introduce a novel retrieval unit, proposition, for dense retrieval. Propositions are defined as atomic expressions within text, each encapsulating a distinct factoid and presented in a concise, self-contained natural language format. We conduct an empirical comparison of different retrieval granularity. Our results reveal that proposition-based retrieval significantly outperforms traditional passage or sentence-based methods in dense retrieval. Moreover, retrieval by proposition also enhances the performance of downstream QA tasks, since the retrieved texts are more condensed with question-relevant information, reducing the need for lengthy input tokens and minimizing the inclusion of extraneous, irrelevant information.\\n\\nChain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models[\\u200b](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models \"Direct link to Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2311.09210v1\\n \\n* **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models\\n \\n* **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.\\n \\n* **Published Date:** 2023-11-15\\n \\n* **URL:** [http://arxiv.org/abs/2311.09210v1](http://arxiv.org/abs/2311.09210v1)\\n \\n* **LangChain:**\\n \\n * **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)\\n \\n\\n**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial advancement in the capabilities of large language models, notably in reducing factual hallucination by leveraging external knowledge sources. However, the reliability of the retrieved information is not always guaranteed. The retrieval of irrelevant data can lead to misguided responses, and potentially causing the model to overlook its inherent knowledge, even when it possesses adequate information to address the query. Moreover, standard RALMs often struggle to assess whether they possess adequate knowledge, both intrinsic and retrieved, to provide an accurate answer. In situations where knowledge is lacking, these systems should ideally respond with \"unknown\" when the answer is unattainable. In response to these challenges, we introduces Chain-of-Noting (CoN), a novel approach aimed at improving the robustness of RALMs in facing noisy, irrelevant documents and in handling unknown scenarios. The core idea of CoN is to generate sequential reading notes for retrieved documents, enabling a thorough evaluation of their relevance to the given question and integrating this information to formulate the final answer. We employed ChatGPT to create training data for CoN, which was subsequently trained on an LLaMa-2 7B model. Our experiments across four open-domain QA benchmarks show that RALMs equipped with CoN significantly outperform standard RALMs. Notably, CoN achieves an average improvement of +7.9 in EM score given entirely noisy retrieved documents and +10.5 in rejection rates for real-time questions that fall outside the pre-training knowledge scope.\\n\\nSelf-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection[\\u200b](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection \"Direct link to Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2310.11511v1\\n \\n* **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection\\n \\n* **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.\\n \\n* **Published Date:** 2023-10-17\\n \\n* **URL:** [http://arxiv.org/abs/2310.11511v1](http://arxiv.org/abs/2310.11511v1)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [langgraph\\\\_self\\\\_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)\\n \\n\\n**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often produce responses containing factual inaccuracies due to their sole reliance on the parametric knowledge they encapsulate. Retrieval-Augmented Generation (RAG), an ad hoc approach that augments LMs with retrieval of relevant knowledge, decreases such issues. However, indiscriminately retrieving and incorporating a fixed number of retrieved passages, regardless of whether retrieval is necessary, or passages are relevant, diminishes LM versatility or can lead to unhelpful response generation. We introduce a new framework called Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM\\'s quality and factuality through retrieval and self-reflection. Our framework trains a single arbitrary LM that adaptively retrieves passages on-demand, and generates and reflects on retrieved passages and its own generations using special tokens, called reflection tokens. Generating reflection tokens makes the LM controllable during the inference phase, enabling it to tailor its behavior to diverse task requirements. Experiments show that Self-RAG (7B and 13B parameters) significantly outperforms state-of-the-art LLMs and retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA, reasoning and fact verification tasks, and it shows significant gains in improving factuality and citation accuracy for long-form generations relative to these models.\\n\\nTake a Step Back: Evoking Reasoning via Abstraction in Large Language Models[\\u200b](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models \"Direct link to Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2310.06117v2\\n \\n* **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models\\n \\n* **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.\\n \\n* **Published Date:** 2023-10-09\\n \\n* **URL:** [http://arxiv.org/abs/2310.06117v2](http://arxiv.org/abs/2310.06117v2)\\n \\n* **LangChain:**\\n \\n * **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)\\n \\n * **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)\\n \\n\\n**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from instances containing specific details. Using the concepts and principles to guide reasoning, LLMs significantly improve their abilities in following a correct reasoning path towards the solution. We conduct experiments of Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe substantial performance gains on various challenging reasoning-intensive tasks including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7% and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.\\n\\nLlama 2: Open Foundation and Fine-Tuned Chat Models[\\u200b](#llama-2-open-foundation-and-fine-tuned-chat-models \"Direct link to Llama 2: Open Foundation and Fine-Tuned Chat Models\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2307.09288v2\\n \\n* **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models\\n \\n* **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.\\n \\n* **Published Date:** 2023-07-18\\n \\n* **URL:** [http://arxiv.org/abs/2307.09288v2](http://arxiv.org/abs/2307.09288v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [Semi\\\\_Structured\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)\\n \\n\\n**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.\\n\\nQuery Rewriting for Retrieval-Augmented Large Language Models[\\u200b](#query-rewriting-for-retrieval-augmented-large-language-models \"Direct link to Query Rewriting for Retrieval-Augmented Large Language Models\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.14283v3\\n \\n* **Title:** Query Rewriting for Retrieval-Augmented Large Language Models\\n \\n* **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.\\n \\n* **Published Date:** 2023-05-23\\n \\n* **URL:** [http://arxiv.org/abs/2305.14283v3](http://arxiv.org/abs/2305.14283v3)\\n \\n* **LangChain:**\\n \\n * **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)\\n \\n * **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)\\n \\n\\n**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the retrieve-then-read pipeline, making remarkable progress in knowledge-intensive tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of the previous retrieve-then-read for the retrieval-augmented LLMs from the perspective of the query rewriting. Unlike prior studies focusing on adapting either the retriever or the reader, our approach pays attention to the adaptation of the search query itself, for there is inevitably a gap between the input text and the needed knowledge in retrieval. We first prompt an LLM to generate the query, then use a web search engine to retrieve contexts. Furthermore, to better align the query to the frozen modules, we propose a trainable scheme for our pipeline. A small language model is adopted as a trainable rewriter to cater to the black-box LLM reader. The rewriter is trained using the feedback of the LLM reader by reinforcement learning. Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice QA. Experiments results show consistent performance improvement, indicating that our framework is proven effective and scalable, and brings a new framework for retrieval-augmented LLM.\\n\\nLarge Language Model Guided Tree-of-Thought[\\u200b](#large-language-model-guided-tree-of-thought \"Direct link to Large Language Model Guided Tree-of-Thought\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.08291v1\\n \\n* **Title:** Large Language Model Guided Tree-of-Thought\\n \\n* **Authors:** Jieyi Long\\n \\n* **Published Date:** 2023-05-15\\n \\n* **URL:** [http://arxiv.org/abs/2305.08291v1](http://arxiv.org/abs/2305.08291v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)\\n \\n * **Cookbook:** [tree\\\\_of\\\\_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)\\n \\n\\n**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel approach aimed at improving the problem-solving capabilities of auto-regressive large language models (LLMs). The ToT technique is inspired by the human mind\\'s approach for solving complex reasoning tasks through trial and error. In this process, the human mind explores the solution space through a tree-like thought process, allowing for backtracking when necessary. To implement ToT as a software system, we augment an LLM with additional modules including a prompter agent, a checker module, a memory module, and a ToT controller. In order to solve a given problem, these modules engage in a multi-round conversation with the LLM. The memory module records the conversation and state history of the problem solving process, which allows the system to backtrack to the previous steps of the thought-process and explore other directions from there. To verify the effectiveness of the proposed technique, we implemented a ToT-based solver for the Sudoku Puzzle. Experimental results show that the ToT framework can significantly increase the success rate of Sudoku puzzle solving. Our implementation of the ToT-based Sudoku solver is available on GitHub: \\\\\\\\url{[https://github.com/jieyilong/tree-of-thought-puzzle-solver}](https://github.com/jieyilong/tree-of-thought-puzzle-solver%7D)\\n.\\n\\nPlan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models[\\u200b](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models \"Direct link to Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2305.04091v3\\n \\n* **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models\\n \\n* **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.\\n \\n* **Published Date:** 2023-05-06\\n \\n* **URL:** [http://arxiv.org/abs/2305.04091v3](http://arxiv.org/abs/2305.04091v3)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [plan\\\\_and\\\\_execute\\\\_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive performance in various NLP tasks. To tackle multi-step reasoning tasks, few-shot chain-of-thought (CoT) prompting includes a few manually crafted step-by-step reasoning demonstrations which enable LLMs to explicitly generate reasoning steps and improve their reasoning task accuracy. To eliminate the manual effort, Zero-shot-CoT concatenates the target problem statement with \"Let\\'s think step by step\" as an input prompt to LLMs. Despite the success of Zero-shot-CoT, it still suffers from three pitfalls: calculation errors, missing-step errors, and semantic misunderstanding errors. To address the missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of two components: first, devising a plan to divide the entire task into smaller subtasks, and then carrying out the subtasks according to the plan. To address the calculation errors and improve the quality of generated reasoning steps, we extend PS prompting with more detailed instructions and derive PS+ prompting. We evaluate our proposed prompting strategy on ten datasets across three reasoning problems. The experimental results over GPT-3 show that our proposed zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought Prompting, and has comparable performance with 8-shot CoT prompting on the math reasoning problem. The code can be found at [https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting](https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting)\\n.\\n\\nVisual Instruction Tuning[\\u200b](#visual-instruction-tuning \"Direct link to Visual Instruction Tuning\")\\n\\n----------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2304.08485v2\\n \\n* **Title:** Visual Instruction Tuning\\n \\n* **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.\\n \\n* **Published Date:** 2023-04-17\\n \\n* **URL:** [http://arxiv.org/abs/2304.08485v2](http://arxiv.org/abs/2304.08485v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [Semi\\\\_structured\\\\_and\\\\_multi\\\\_modal\\\\_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)\\n , [Semi\\\\_structured\\\\_multi\\\\_modal\\\\_RAG\\\\_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)\\n \\n\\n**Abstract:** Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available.\\n\\nGenerative Agents: Interactive Simulacra of Human Behavior[\\u200b](#generative-agents-interactive-simulacra-of-human-behavior \"Direct link to Generative Agents: Interactive Simulacra of Human Behavior\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2304.03442v2\\n \\n* **Title:** Generative Agents: Interactive Simulacra of Human Behavior\\n \\n* **Authors:** Joon Sung Park, Joseph C. O\\'Brien, Carrie J. Cai, et al.\\n \\n* **Published Date:** 2023-04-07\\n \\n* **URL:** [http://arxiv.org/abs/2304.03442v2](http://arxiv.org/abs/2304.03442v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [multiagent\\\\_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)\\n , [generative\\\\_agents\\\\_interactive\\\\_simulacra\\\\_of\\\\_human\\\\_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)\\n \\n\\n**Abstract:** Believable proxies of human behavior can empower interactive applications ranging from immersive environments to rehearsal spaces for interpersonal communication to prototyping tools. In this paper, we introduce generative agents--computational software agents that simulate believable human behavior. Generative agents wake up, cook breakfast, and head to work; artists paint, while authors write; they form opinions, notice each other, and initiate conversations; they remember and reflect on days past as they plan the next day. To enable generative agents, we describe an architecture that extends a large language model to store a complete record of the agent\\'s experiences using natural language, synthesize those memories over time into higher-level reflections, and retrieve them dynamically to plan behavior. We instantiate generative agents to populate an interactive sandbox environment inspired by The Sims, where end users can interact with a small town of twenty five agents using natural language. In an evaluation, these generative agents produce believable individual and emergent social behaviors: for example, starting with only a single user-specified notion that one agent wants to throw a Valentine\\'s Day party, the agents autonomously spread invitations to the party over the next two days, make new acquaintances, ask each other out on dates to the party, and coordinate to show up for the party together at the right time. We demonstrate through ablation that the components of our agent architecture--observation, planning, and reflection--each contribute critically to the believability of agent behavior. By fusing large language models with computational, interactive agents, this work introduces architectural and interaction patterns for enabling believable simulations of human behavior.\\n\\nCAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society[\\u200b](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society \"Direct link to CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.17760v2\\n \\n* **Title:** CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society\\n \\n* **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.\\n \\n* **Published Date:** 2023-03-31\\n \\n* **URL:** [http://arxiv.org/abs/2303.17760v2](http://arxiv.org/abs/2303.17760v2)\\n \\n* **LangChain:**\\n \\n * **Cookbook:** [camel\\\\_role\\\\_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)\\n \\n\\n**Abstract:** The rapid advancement of chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents, and provides insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of a society of agents, providing a valuable resource for investigating conversational language models. In particular, we conduct comprehensive studies on instruction-following cooperation in multi-agent settings. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond: [https://github.com/camel-ai/camel](https://github.com/camel-ai/camel)\\n.\\n\\nHuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face[\\u200b](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face \"Direct link to HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\")\\n\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.17580v4\\n \\n* **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face\\n \\n* **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.\\n \\n* **Published Date:** 2023-03-30\\n \\n* **URL:** [http://arxiv.org/abs/2303.17580v4](http://arxiv.org/abs/2303.17580v4)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.autonomous\\\\_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)\\n \\n * **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)\\n \\n\\n**Abstract:** Solving complicated AI tasks with different domains and modalities is a key step toward artificial general intelligence. While there are numerous AI models available for various domains and modalities, they cannot handle complicated AI tasks autonomously. Considering large language models (LLMs) have exhibited exceptional abilities in language understanding, generation, interaction, and reasoning, we advocate that LLMs could act as a controller to manage existing AI models to solve complicated AI tasks, with language serving as a generic interface to empower this. Based on this philosophy, we present HuggingGPT, an LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI models in machine learning communities (e.g., Hugging Face) to solve AI tasks. Specifically, we use ChatGPT to conduct task planning when receiving a user request, select models according to their function descriptions available in Hugging Face, execute each subtask with the selected AI model, and summarize the response according to the execution results. By leveraging the strong language capability of ChatGPT and abundant AI models in Hugging Face, HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different modalities and domains and achieve impressive results in language, vision, speech, and other challenging tasks, which paves a new way towards the realization of artificial general intelligence.\\n\\nGPT-4 Technical Report[\\u200b](#gpt-4-technical-report \"Direct link to GPT-4 Technical Report\")\\n\\n-------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2303.08774v6\\n \\n* **Title:** GPT-4 Technical Report\\n \\n* **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.\\n \\n* **Published Date:** 2023-03-15\\n \\n* **URL:** [http://arxiv.org/abs/2303.08774v6](http://arxiv.org/abs/2303.08774v6)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/vectorstores/mongodb\\\\_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)\\n \\n\\n**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4\\'s performance based on models trained with no more than 1/1,000th the compute of GPT-4.\\n\\nA Watermark for Large Language Models[\\u200b](#a-watermark-for-large-language-models \"Direct link to A Watermark for Large Language Models\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2301.10226v4\\n \\n* **Title:** A Watermark for Large Language Models\\n \\n* **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.\\n \\n* **Published Date:** 2023-01-24\\n \\n* **URL:** [http://arxiv.org/abs/2301.10226v4](http://arxiv.org/abs/2301.10226v4)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Potential harms of large language models can be mitigated by watermarking model output, i.e., embedding signals into generated text that are invisible to humans but algorithmically detectable from a short span of tokens. We propose a watermarking framework for proprietary language models. The watermark can be embedded with negligible impact on text quality, and can be detected using an efficient open-source algorithm without access to the language model API or parameters. The watermark works by selecting a randomized set of \"green\" tokens before a word is generated, and then softly promoting use of green tokens during sampling. We propose a statistical test for detecting the watermark with interpretable p-values, and derive an information-theoretic framework for analyzing the sensitivity of the watermark. We test the watermark using a multi-billion parameter model from the Open Pretrained Transformer (OPT) family, and discuss robustness and security.\\n\\nPrecise Zero-Shot Dense Retrieval without Relevance Labels[\\u200b](#precise-zero-shot-dense-retrieval-without-relevance-labels \"Direct link to Precise Zero-Shot Dense Retrieval without Relevance Labels\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2212.10496v1\\n \\n* **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels\\n \\n* **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.\\n \\n* **Published Date:** 2022-12-20\\n \\n* **URL:** [http://arxiv.org/abs/2212.10496v1](http://arxiv.org/abs/2212.10496v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)\\n \\n * **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)\\n \\n * **Cookbook:** [hypothetical\\\\_document\\\\_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)\\n \\n\\n**Abstract:** While dense retrieval has been shown effective and efficient across tasks and languages, it remains difficult to create effective fully zero-shot dense retrieval systems when no relevance label is available. In this paper, we recognize the difficulty of zero-shot learning and encoding relevance. Instead, we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a query, HyDE first zero-shot instructs an instruction-following language model (e.g. InstructGPT) to generate a hypothetical document. The document captures relevance patterns but is unreal and may contain false details. Then, an unsupervised contrastively learned encoder~(e.g. Contriever) encodes the document into an embedding vector. This vector identifies a neighborhood in the corpus embedding space, where similar real documents are retrieved based on vector similarity. This second step ground the generated document to the actual corpus, with the encoder\\'s dense bottleneck filtering out the incorrect details. Our experiments show that HyDE significantly outperforms the state-of-the-art unsupervised dense retriever Contriever and shows strong performance comparable to fine-tuned retrievers, across various tasks (e.g. web search, QA, fact verification) and languages~(e.g. sw, ko, ja).\\n\\nRobust and Explainable Identification of Logical Fallacies in Natural Language Arguments[\\u200b](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments \"Direct link to Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2212.07425v3\\n \\n* **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments\\n \\n* **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.\\n \\n* **Published Date:** 2022-12-12\\n \\n* **URL:** [http://arxiv.org/abs/2212.07425v3](http://arxiv.org/abs/2212.07425v3)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.fallacy\\\\_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)\\n \\n\\n**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been amplified in the Internet era. Given the volume of data and the subtlety of identifying violations of argumentation norms, supporting information analytics tasks, like content moderation, with trustworthy methods that can identify logical fallacies is essential. In this paper, we formalize prior theoretical work on logical fallacies into a comprehensive three-stage evaluation framework of detection, coarse-grained, and fine-grained classification. We adapt existing evaluation datasets for each stage of the evaluation. We employ three families of robust and explainable methods based on prototype reasoning, instance-based reasoning, and knowledge injection. The methods combine language models with background knowledge and explainable mechanisms. Moreover, we address data sparsity with strategies for data augmentation and curriculum learning. Our three-stage framework natively consolidates prior datasets and methods from existing tasks, like propaganda detection, serving as an overarching evaluation testbed. We extensively evaluate these methods on our datasets, focusing on their robustness and explainability. Our results provide insight into the strengths and weaknesses of the methods on different components and fallacy classes, indicating that fallacy identification is a challenging task that may require specialized forms of reasoning to capture various classes. We share our open-source code and data on GitHub to support further work on logical fallacy identification.\\n\\nComplementary Explanations for Effective In-Context Learning[\\u200b](#complementary-explanations-for-effective-in-context-learning \"Direct link to Complementary Explanations for Effective In-Context Learning\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2211.13892v2\\n \\n* **Title:** Complementary Explanations for Effective In-Context Learning\\n \\n* **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.\\n \\n* **Published Date:** 2022-11-25\\n \\n* **URL:** [http://arxiv.org/abs/2211.13892v2](http://arxiv.org/abs/2211.13892v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)\\n \\n\\n**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in learning from explanations in prompts, but there has been limited understanding of exactly how these explanations function or why they are effective. This work aims to better understand the mechanisms by which explanations are used for in-context learning. We first study the impact of two different factors on the performance of prompts with explanations: the computation trace (the way the solution is decomposed) and the natural language used to express the prompt. By perturbing explanations on three controlled tasks, we show that both factors contribute to the effectiveness of explanations. We further study how to form maximally effective sets of explanations for solving a given test query. We find that LLMs can benefit from the complementarity of the explanation set: diverse reasoning skills shown by different exemplars can lead to better performance. Therefore, we propose a maximal marginal relevance-based exemplar selection approach for constructing exemplar sets that are both relevant as well as complementary, which successfully improves the in-context learning performance across three real-world tasks on multiple LLMs.\\n\\nPAL: Program-aided Language Models[\\u200b](#pal-program-aided-language-models \"Direct link to PAL: Program-aided Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2211.10435v2\\n \\n* **Title:** PAL: Program-aided Language Models\\n \\n* **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.\\n \\n* **Published Date:** 2022-11-18\\n \\n* **URL:** [http://arxiv.org/abs/2211.10435v2](http://arxiv.org/abs/2211.10435v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)\\n , [langchain\\\\_experimental.pal\\\\_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)\\n \\n * **Cookbook:** [program\\\\_aided\\\\_language\\\\_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)\\n \\n\\n**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability to perform arithmetic and symbolic reasoning tasks, when provided with a few examples at test time (\"few-shot prompting\"). Much of this success can be attributed to prompting methods such as \"chain-of-thought\\'\\', which employ LLMs for both understanding the problem description by decomposing it into steps, as well as solving each step of the problem. While LLMs seem to be adept at this sort of step-by-step decomposition, LLMs often make logical and arithmetic mistakes in the solution part, even when the problem is decomposed correctly. In this paper, we present Program-Aided Language models (PAL): a novel approach that uses the LLM to read natural language problems and generate programs as the intermediate reasoning steps, but offloads the solution step to a runtime such as a Python interpreter. With PAL, decomposing the natural language problem into runnable steps remains the only learning task for the LLM, while solving is delegated to the interpreter. We demonstrate this synergy between a neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all these natural language reasoning tasks, generating code using an LLM and reasoning using a Python interpreter leads to more accurate results than much larger models. For example, PAL using Codex achieves state-of-the-art few-shot accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B which uses chain-of-thought by absolute 15% top-1. Our code and data are publicly available at [http://reasonwithpal.com/](http://reasonwithpal.com/)\\n .\\n\\nReAct: Synergizing Reasoning and Acting in Language Models[\\u200b](#react-synergizing-reasoning-and-acting-in-language-models \"Direct link to ReAct: Synergizing Reasoning and Acting in Language Models\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2210.03629v3\\n \\n* **Title:** ReAct: Synergizing Reasoning and Acting in Language Models\\n \\n* **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.\\n \\n* **Published Date:** 2022-10-06\\n \\n* **URL:** [http://arxiv.org/abs/2210.03629v3](http://arxiv.org/abs/2210.03629v3)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere)\\n , [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface)\\n , [docs/integrations/tools/ionic\\\\_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)\\n \\n * **API Reference:** [langchain...create\\\\_react\\\\_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)\\n , [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)\\n \\n\\n**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities across tasks in language understanding and interactive decision making, their abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g. action plan generation) have primarily been studied as separate topics. In this paper, we explore the use of LLMs to generate both reasoning traces and task-specific actions in an interleaved manner, allowing for greater synergy between the two: reasoning traces help the model induce, track, and update action plans as well as handle exceptions, while actions allow it to interface with external sources, such as knowledge bases or environments, to gather additional information. We apply our approach, named ReAct, to a diverse set of language and decision making tasks and demonstrate its effectiveness over state-of-the-art baselines, as well as improved human interpretability and trustworthiness over methods without reasoning or acting components. Concretely, on question answering (HotpotQA) and fact verification (Fever), ReAct overcomes issues of hallucination and error propagation prevalent in chain-of-thought reasoning by interacting with a simple Wikipedia API, and generates human-like task-solving trajectories that are more interpretable than baselines without reasoning traces. On two interactive decision making benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and reinforcement learning methods by an absolute success rate of 34% and 10% respectively, while being prompted with only one or two in-context examples. Project site with code: [https://react-lm.github.io](https://react-lm.github.io)\\n\\nDeep Lake: a Lakehouse for Deep Learning[\\u200b](#deep-lake-a-lakehouse-for-deep-learning \"Direct link to Deep Lake: a Lakehouse for Deep Learning\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2209.10785v2\\n \\n* **Title:** Deep Lake: a Lakehouse for Deep Learning\\n \\n* **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.\\n \\n* **Published Date:** 2022-09-22\\n \\n* **URL:** [http://arxiv.org/abs/2209.10785v2](http://arxiv.org/abs/2209.10785v2)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/providers/activeloop\\\\_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)\\n \\n\\n**Abstract:** Traditional data lakes provide critical data infrastructure for analytical workloads by enabling time travel, running SQL queries, ingesting data with ACID transactions, and visualizing petabyte-scale datasets on cloud storage. They allow organizations to break down data silos, unlock data-driven decision-making, improve operational efficiency, and reduce costs. However, as deep learning usage increases, traditional data lakes are not well-designed for applications such as natural language processing (NLP), audio processing, computer vision, and applications involving non-tabular datasets. This paper presents Deep Lake, an open-source lakehouse for deep learning applications developed at Activeloop. Deep Lake maintains the benefits of a vanilla data lake with one key difference: it stores complex data, such as images, videos, annotations, as well as tabular data, in the form of tensors and rapidly streams the data over the network to (a) Tensor Query Language, (b) in-browser visualization engine, or (c) deep learning frameworks without sacrificing GPU utilization. Datasets stored in Deep Lake can be accessed from PyTorch, TensorFlow, JAX, and integrate with numerous MLOps tools.\\n\\nBitext Mining Using Distilled Sentence Representations for Low-Resource Languages[\\u200b](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages \"Direct link to Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2205.12654v1\\n \\n* **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\\n \\n* **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk\\n \\n* **Published Date:** 2022-05-25\\n \\n* **URL:** [http://arxiv.org/abs/2205.12654v1](http://arxiv.org/abs/2205.12654v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)\\n \\n\\n**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent languages is challenging, in particular to cover the long tail of low-resource languages. A promising approach has been to train one-for-all multilingual models capable of cross-lingual transfer, but these models often suffer from insufficient capacity and interference between unrelated languages. Instead, we move away from this approach and focus on training multiple language (family) specific representations, but most prominently enable all languages to still be encoded in the same representational space. To achieve this, we focus on teacher-student training, allowing all encoders to be mutually compatible for bitext mining, and enabling fast learning of new languages. We introduce a new teacher-student training scheme which combines supervised and self-supervised training, allowing encoders to take advantage of monolingual training data, which is valuable in the low-resource setting. Our approach significantly outperforms the original LASER encoder. We study very low-resource languages and handle 50 African languages, many of which are not covered by any other model. For these languages, we train sentence encoders, mine bitexts, and validate the bitexts by training NMT systems.\\n\\nEvaluating the Text-to-SQL Capabilities of Large Language Models[\\u200b](#evaluating-the-text-to-sql-capabilities-of-large-language-models \"Direct link to Evaluating the Text-to-SQL Capabilities of Large Language Models\")\\n\\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2204.00498v1\\n \\n* **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models\\n \\n* **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau\\n \\n* **Published Date:** 2022-03-15\\n \\n* **URL:** [http://arxiv.org/abs/2204.00498v1](http://arxiv.org/abs/2204.00498v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)\\n , [langchain\\\\_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)\\n \\n\\n**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex language model. We find that, without any finetuning, Codex is a strong baseline on the Spider benchmark; we also analyze the failure modes of Codex in this setting. Furthermore, we demonstrate on the GeoQuery and Scholar benchmarks that a small number of in-domain examples provided in the prompt enables Codex to perform better than state-of-the-art models finetuned on such few-shot examples.\\n\\nLocally Typical Sampling[\\u200b](#locally-typical-sampling \"Direct link to Locally Typical Sampling\")\\n\\n-------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2202.00666v5\\n \\n* **Title:** Locally Typical Sampling\\n \\n* **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.\\n \\n* **Published Date:** 2022-02-01\\n \\n* **URL:** [http://arxiv.org/abs/2202.00666v5](http://arxiv.org/abs/2202.00666v5)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Today\\'s probabilistic language generators fall short when it comes to producing coherent and fluent text despite the fact that the underlying models perform well under standard metrics, e.g., perplexity. This discrepancy has puzzled the language generation community for the last few years. In this work, we posit that the abstraction of natural language generation as a discrete stochastic process--which allows for an information-theoretic analysis--can provide new insights into the behavior of probabilistic language generators, e.g., why high-probability texts can be dull or repetitive. Humans use language as a means of communicating information, aiming to do so in a simultaneously efficient and error-minimizing manner; in fact, psycholinguistics research suggests humans choose each word in a string with this subconscious goal in mind. We formally define the set of strings that meet this criterion: those for which each word has an information content close to the expected information content, i.e., the conditional entropy of our model. We then propose a simple and efficient procedure for enforcing this criterion when generating from probabilistic models, which we call locally typical sampling. Automatic and human evaluations show that, in comparison to nucleus and top-k sampling, locally typical sampling offers competitive performance (in both abstractive summarization and story generation) in terms of quality while consistently reducing degenerate repetitions.\\n\\nLearning Transferable Visual Models From Natural Language Supervision[\\u200b](#learning-transferable-visual-models-from-natural-language-supervision \"Direct link to Learning Transferable Visual Models From Natural Language Supervision\")\\n\\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 2103.00020v1\\n \\n* **Title:** Learning Transferable Visual Models From Natural Language Supervision\\n \\n* **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.\\n \\n* **Published Date:** 2021-02-26\\n \\n* **URL:** [http://arxiv.org/abs/2103.00020v1](http://arxiv.org/abs/2103.00020v1)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_experimental.open\\\\_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)\\n \\n\\n**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at [https://github.com/OpenAI/CLIP](https://github.com/OpenAI/CLIP)\\n.\\n\\nCTRL: A Conditional Transformer Language Model for Controllable Generation[\\u200b](#ctrl-a-conditional-transformer-language-model-for-controllable-generation \"Direct link to CTRL: A Conditional Transformer Language Model for Controllable Generation\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 1909.05858v2\\n \\n* **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation\\n \\n* **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.\\n \\n* **Published Date:** 2019-09-11\\n \\n* **URL:** [http://arxiv.org/abs/1909.05858v2](http://arxiv.org/abs/1909.05858v2)\\n \\n* **LangChain:**\\n \\n * **API Reference:** [langchain\\\\_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)\\n , [langchain\\\\_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)\\n \\n\\n**Abstract:** Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution. We have released multiple full-sized, pretrained versions of CTRL at [https://github.com/salesforce/ctrl](https://github.com/salesforce/ctrl)\\n.\\n\\nSentence-BERT: Sentence Embeddings using Siamese BERT-Networks[\\u200b](#sentence-bert-sentence-embeddings-using-siamese-bert-networks \"Direct link to Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\")\\n\\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\\n* **arXiv id:** 1908.10084v1\\n \\n* **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\\n \\n* **Authors:** Nils Reimers, Iryna Gurevych\\n \\n* **Published Date:** 2019-08-27\\n \\n* **URL:** [http://arxiv.org/abs/1908.10084v1](http://arxiv.org/abs/1908.10084v1)\\n \\n* **LangChain:**\\n \\n * **Documentation:** [docs/integrations/text\\\\_embedding/sentence\\\\_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)\\n \\n\\n**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/arxiv_references.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Summary](#summary)\\n \\n* [Self-Discover: Large Language Models Self-Compose Reasoning Structures](#self-discover-large-language-models-self-compose-reasoning-structures)\\n \\n* [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](#raptor-recursive-abstractive-processing-for-tree-organized-retrieval)\\n \\n* [Corrective Retrieval Augmented Generation](#corrective-retrieval-augmented-generation)\\n \\n* [Mixtral of Experts](#mixtral-of-experts)\\n \\n* [Dense X Retrieval: What Retrieval Granularity Should We Use?](#dense-x-retrieval-what-retrieval-granularity-should-we-use)\\n \\n* [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](#chain-of-note-enhancing-robustness-in-retrieval-augmented-language-models)\\n \\n* [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](#self-rag-learning-to-retrieve-generate-and-critique-through-self-reflection)\\n \\n* [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](#take-a-step-back-evoking-reasoning-via-abstraction-in-large-language-models)\\n \\n* [Llama 2: Open Foundation and Fine-Tuned Chat Models](#llama-2-open-foundation-and-fine-tuned-chat-models)\\n \\n* [Query Rewriting for Retrieval-Augmented Large Language Models](#query-rewriting-for-retrieval-augmented-large-language-models)\\n \\n* [Large Language Model Guided Tree-of-Thought](#large-language-model-guided-tree-of-thought)\\n \\n* [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](#plan-and-solve-prompting-improving-zero-shot-chain-of-thought-reasoning-by-large-language-models)\\n \\n* [Visual Instruction Tuning](#visual-instruction-tuning)\\n \\n* [Generative Agents: Interactive Simulacra of Human Behavior](#generative-agents-interactive-simulacra-of-human-behavior)\\n \\n* [CAMEL: Communicative Agents for \"Mind\" Exploration of Large Language Model Society](#camel-communicative-agents-for-mind-exploration-of-large-language-model-society)\\n \\n* [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](#hugginggpt-solving-ai-tasks-with-chatgpt-and-its-friends-in-hugging-face)\\n \\n* [GPT-4 Technical Report](#gpt-4-technical-report)\\n \\n* [A Watermark for Large Language Models](#a-watermark-for-large-language-models)\\n \\n* [Precise Zero-Shot Dense Retrieval without Relevance Labels](#precise-zero-shot-dense-retrieval-without-relevance-labels)\\n \\n* [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](#robust-and-explainable-identification-of-logical-fallacies-in-natural-language-arguments)\\n \\n* [Complementary Explanations for Effective In-Context Learning](#complementary-explanations-for-effective-in-context-learning)\\n \\n* [PAL: Program-aided Language Models](#pal-program-aided-language-models)\\n \\n* [ReAct: Synergizing Reasoning and Acting in Language Models](#react-synergizing-reasoning-and-acting-in-language-models)\\n \\n* [Deep Lake: a Lakehouse for Deep Learning](#deep-lake-a-lakehouse-for-deep-learning)\\n \\n* [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](#bitext-mining-using-distilled-sentence-representations-for-low-resource-languages)\\n \\n* [Evaluating the Text-to-SQL Capabilities of Large Language Models](#evaluating-the-text-to-sql-capabilities-of-large-language-models)\\n \\n* [Locally Typical Sampling](#locally-typical-sampling)\\n \\n* [Learning Transferable Visual Models From Natural Language Supervision](#learning-transferable-visual-models-from-natural-language-supervision)\\n \\n* [CTRL: A Conditional Transformer Language Model for Controllable Generation](#ctrl-a-conditional-transformer-language-model-for-controllable-generation)\\n \\n* [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](#sentence-bert-sentence-embeddings-using-siamese-bert-networks)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/arxiv_references/', 'pageStatusCode': 200}}, {'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nDependents\\n==========\\n\\nDependents stats for `langchain-ai/langchain`\\n\\n[![](https://img.shields.io/static/v1?label=Used%20by&message=41717&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)\\n [![](https://img.shields.io/static/v1?label=Used%20by%20(public)](https://github.com/langchain-ai/langchain/network/dependents)\\n [![](https://img.shields.io/static/v1?label=Used%20by%20(private)](https://github.com/langchain-ai/langchain/network/dependents)\\n\\n\\\\[update: `2023-12-08`; only dependent repositories with Stars > 100\\\\]\\n\\n| Repository | Stars |\\n| --- | --- |\\n| [AntonOsika/gpt-engineer](https://github.com/AntonOsika/gpt-engineer) | 46514 |\\n| [imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 44439 |\\n| [LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 35906 |\\n| [hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 35528 |\\n| [moymix/TaskMatrix](https://github.com/moymix/TaskMatrix) | 34342 |\\n| [geekan/MetaGPT](https://github.com/geekan/MetaGPT) | 31126 |\\n| [streamlit/streamlit](https://github.com/streamlit/streamlit) | 28911 |\\n| [reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 27833 |\\n| [StanGirard/quivr](https://github.com/StanGirard/quivr) | 26032 |\\n| [OpenBB-finance/OpenBBTerminal](https://github.com/OpenBB-finance/OpenBBTerminal) | 24946 |\\n| [run-llama/llama\\\\_index](https://github.com/run-llama/llama_index) | 24859 |\\n| [jmorganca/ollama](https://github.com/jmorganca/ollama) | 20849 |\\n| [openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 20249 |\\n| [chatchat-space/Langchain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat) | 19305 |\\n| [mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 19172 |\\n| [PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 17528 |\\n| [cube-js/cube](https://github.com/cube-js/cube) | 16575 |\\n| [mlflow/mlflow](https://github.com/mlflow/mlflow) | 16000 |\\n| [mudler/LocalAI](https://github.com/mudler/LocalAI) | 14067 |\\n| [logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 13679 |\\n| [GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 13648 |\\n| [arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 13423 |\\n| [openai/evals](https://github.com/openai/evals) | 12649 |\\n| [airbytehq/airbyte](https://github.com/airbytehq/airbyte) | 12460 |\\n| [langgenius/dify](https://github.com/langgenius/dify) | 11859 |\\n| [databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10672 |\\n| [AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 9437 |\\n| [langchain-ai/langchainjs](https://github.com/langchain-ai/langchainjs) | 9227 |\\n| [gventuri/pandas-ai](https://github.com/gventuri/pandas-ai) | 9203 |\\n| [aws/amazon-sagemaker-examples](https://github.com/aws/amazon-sagemaker-examples) | 9079 |\\n| [h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 8945 |\\n| [PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 7550 |\\n| [bentoml/OpenLLM](https://github.com/bentoml/OpenLLM) | 6957 |\\n| [THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6801 |\\n| [microsoft/promptflow](https://github.com/microsoft/promptflow) | 6776 |\\n| [cpacker/MemGPT](https://github.com/cpacker/MemGPT) | 6642 |\\n| [joshpxyne/gpt-migrate](https://github.com/joshpxyne/gpt-migrate) | 6482 |\\n| [zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 6037 |\\n| [embedchain/embedchain](https://github.com/embedchain/embedchain) | 6023 |\\n| [mage-ai/mage-ai](https://github.com/mage-ai/mage-ai) | 6019 |\\n| [assafelovic/gpt-researcher](https://github.com/assafelovic/gpt-researcher) | 5936 |\\n| [sweepai/sweep](https://github.com/sweepai/sweep) | 5855 |\\n| [wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 5766 |\\n| [zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 5710 |\\n| [pdm-project/pdm](https://github.com/pdm-project/pdm) | 5665 |\\n| [GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 5568 |\\n| [gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 5507 |\\n| [Shaunwei/RealChar](https://github.com/Shaunwei/RealChar) | 5501 |\\n| [facebookresearch/llama-recipes](https://github.com/facebookresearch/llama-recipes) | 5477 |\\n| [serge-chat/serge](https://github.com/serge-chat/serge) | 5221 |\\n| [run-llama/rags](https://github.com/run-llama/rags) | 4916 |\\n| [openchatai/OpenChat](https://github.com/openchatai/OpenChat) | 4870 |\\n| [danswer-ai/danswer](https://github.com/danswer-ai/danswer) | 4774 |\\n| [langchain-ai/opengpts](https://github.com/langchain-ai/opengpts) | 4709 |\\n| [postgresml/postgresml](https://github.com/postgresml/postgresml) | 4639 |\\n| [MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 4582 |\\n| [intel-analytics/BigDL](https://github.com/intel-analytics/BigDL) | 4581 |\\n| [yihong0618/xiaogpt](https://github.com/yihong0618/xiaogpt) | 4359 |\\n| [RayVentura/ShortGPT](https://github.com/RayVentura/ShortGPT) | 4357 |\\n| [Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 4317 |\\n| [madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4289 |\\n| [apache/nifi](https://github.com/apache/nifi) | 4098 |\\n| [langchain-ai/chat-langchain](https://github.com/langchain-ai/chat-langchain) | 4091 |\\n| [aiwaves-cn/agents](https://github.com/aiwaves-cn/agents) | 4073 |\\n| [krishnaik06/The-Grand-Complete-Data-Science-Materials](https://github.com/krishnaik06/The-Grand-Complete-Data-Science-Materials) | 4065 |\\n| [khoj-ai/khoj](https://github.com/khoj-ai/khoj) | 4016 |\\n| [Azure/azure-sdk-for-python](https://github.com/Azure/azure-sdk-for-python) | 3941 |\\n| [PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 3915 |\\n| [OpenBMB/ToolBench](https://github.com/OpenBMB/ToolBench) | 3799 |\\n| [marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3771 |\\n| [kyegomez/tree-of-thoughts](https://github.com/kyegomez/tree-of-thoughts) | 3688 |\\n| [Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 3543 |\\n| [llm-workflow-engine/llm-workflow-engine](https://github.com/llm-workflow-engine/llm-workflow-engine) | 3515 |\\n| [shroominic/codeinterpreter-api](https://github.com/shroominic/codeinterpreter-api) | 3425 |\\n| [openchatai/OpenCopilot](https://github.com/openchatai/OpenCopilot) | 3418 |\\n| [josStorer/RWKV-Runner](https://github.com/josStorer/RWKV-Runner) | 3297 |\\n| [whitead/paper-qa](https://github.com/whitead/paper-qa) | 3280 |\\n| [homanp/superagent](https://github.com/homanp/superagent) | 3258 |\\n| [ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui) | 3199 |\\n| [OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 3099 |\\n| [project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 3090 |\\n| [OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2989 |\\n| [xlang-ai/OpenAgents](https://github.com/xlang-ai/OpenAgents) | 2825 |\\n| [dataelement/bisheng](https://github.com/dataelement/bisheng) | 2797 |\\n| [Mintplex-Labs/anything-llm](https://github.com/Mintplex-Labs/anything-llm) | 2784 |\\n| [OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2734 |\\n| [run-llama/llama-hub](https://github.com/run-llama/llama-hub) | 2721 |\\n| [SamurAIGPT/EmbedAI](https://github.com/SamurAIGPT/EmbedAI) | 2647 |\\n| [NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 2637 |\\n| [X-D-Lab/LangChain-ChatGLM-Webui](https://github.com/X-D-Lab/LangChain-ChatGLM-Webui) | 2532 |\\n| [GerevAI/gerev](https://github.com/GerevAI/gerev) | 2517 |\\n| [keephq/keep](https://github.com/keephq/keep) | 2448 |\\n| [yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 2397 |\\n| [OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 2324 |\\n| [IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 2241 |\\n| [YiVal/YiVal](https://github.com/YiVal/YiVal) | 2232 |\\n| [jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 2189 |\\n| [Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 2136 |\\n| [microsoft/TaskWeaver](https://github.com/microsoft/TaskWeaver) | 2126 |\\n| [hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 2083 |\\n| [FlagOpen/FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) | 2053 |\\n| [paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1999 |\\n| [hegelai/prompttools](https://github.com/hegelai/prompttools) | 1984 |\\n| [mckinsey/vizro](https://github.com/mckinsey/vizro) | 1951 |\\n| [vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1868 |\\n| [dot-agent/openAMS](https://github.com/dot-agent/openAMS) | 1796 |\\n| [explodinggradients/ragas](https://github.com/explodinggradients/ragas) | 1766 |\\n| [AI-Citizen/SolidGPT](https://github.com/AI-Citizen/SolidGPT) | 1761 |\\n| [Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1696 |\\n| [run-llama/sec-insights](https://github.com/run-llama/sec-insights) | 1654 |\\n| [avinashkranjan/Amazing-Python-Scripts](https://github.com/avinashkranjan/Amazing-Python-Scripts) | 1635 |\\n| [microsoft/WhatTheHack](https://github.com/microsoft/WhatTheHack) | 1629 |\\n| [noahshinn/reflexion](https://github.com/noahshinn/reflexion) | 1625 |\\n| [psychic-api/psychic](https://github.com/psychic-api/psychic) | 1618 |\\n| [Forethought-Technologies/AutoChain](https://github.com/Forethought-Technologies/AutoChain) | 1611 |\\n| [pinterest/querybook](https://github.com/pinterest/querybook) | 1586 |\\n| [refuel-ai/autolabel](https://github.com/refuel-ai/autolabel) | 1553 |\\n| [jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 1537 |\\n| [jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1522 |\\n| [agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1493 |\\n| [ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1484 |\\n| [greshake/llm-security](https://github.com/greshake/llm-security) | 1483 |\\n| [promptfoo/promptfoo](https://github.com/promptfoo/promptfoo) | 1480 |\\n| [milvus-io/bootcamp](https://github.com/milvus-io/bootcamp) | 1477 |\\n| [richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1475 |\\n| [melih-unsal/DemoGPT](https://github.com/melih-unsal/DemoGPT) | 1428 |\\n| [YORG-AI/Open-Assistant](https://github.com/YORG-AI/Open-Assistant) | 1419 |\\n| [101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 1416 |\\n| [jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1408 |\\n| [mmz-001/knowledge\\\\_gpt](https://github.com/mmz-001/knowledge_gpt) | 1398 |\\n| [intel/intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers) | 1387 |\\n| [Azure/azureml-examples](https://github.com/Azure/azureml-examples) | 1385 |\\n| [lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1367 |\\n| [eyurtsev/kor](https://github.com/eyurtsev/kor) | 1355 |\\n| [xusenlinzy/api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) | 1325 |\\n| [griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 1323 |\\n| [SuperDuperDB/superduperdb](https://github.com/SuperDuperDB/superduperdb) | 1290 |\\n| [cofactoryai/textbase](https://github.com/cofactoryai/textbase) | 1284 |\\n| [psychic-api/rag-stack](https://github.com/psychic-api/rag-stack) | 1260 |\\n| [filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 1250 |\\n| [nod-ai/SHARK](https://github.com/nod-ai/SHARK) | 1237 |\\n| [pluralsh/plural](https://github.com/pluralsh/plural) | 1234 |\\n| [cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 1194 |\\n| [LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya) | 1184 |\\n| [poe-platform/server-bot-quick-start](https://github.com/poe-platform/server-bot-quick-start) | 1182 |\\n| [microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 1180 |\\n| [juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1171 |\\n| [visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 1156 |\\n| [alejandro-ao/ask-multiple-pdfs](https://github.com/alejandro-ao/ask-multiple-pdfs) | 1153 |\\n| [ThousandBirdsInc/chidori](https://github.com/ThousandBirdsInc/chidori) | 1152 |\\n| [irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 1137 |\\n| [SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 1083 |\\n| [ray-project/llm-applications](https://github.com/ray-project/llm-applications) | 1080 |\\n| [run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 1072 |\\n| [jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 1041 |\\n| [MetaGLM/FinGLM](https://github.com/MetaGLM/FinGLM) | 1035 |\\n| [peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 1020 |\\n| [Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 991 |\\n| [langchain-ai/langserve](https://github.com/langchain-ai/langserve) | 983 |\\n| [THUDM/AgentTuning](https://github.com/THUDM/AgentTuning) | 976 |\\n| [rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 975 |\\n| [codeacme17/examor](https://github.com/codeacme17/examor) | 964 |\\n| [all-in-aigc/gpts-works](https://github.com/all-in-aigc/gpts-works) | 946 |\\n| [Ikaros-521/AI-Vtuber](https://github.com/Ikaros-521/AI-Vtuber) | 946 |\\n| [microsoft/Llama-2-Onnx](https://github.com/microsoft/Llama-2-Onnx) | 898 |\\n| [cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 895 |\\n| [ricklamers/shell-ai](https://github.com/ricklamers/shell-ai) | 893 |\\n| [modelscope/modelscope-agent](https://github.com/modelscope/modelscope-agent) | 893 |\\n| [seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 886 |\\n| [ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 880 |\\n| [kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference](https://github.com/kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference) | 872 |\\n| [corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 846 |\\n| [hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 841 |\\n| [kreneskyp/ix](https://github.com/kreneskyp/ix) | 821 |\\n| [Link-AGI/AutoAgents](https://github.com/Link-AGI/AutoAgents) | 820 |\\n| [truera/trulens](https://github.com/truera/trulens) | 794 |\\n| [Dataherald/dataherald](https://github.com/Dataherald/dataherald) | 788 |\\n| [sunlabuiuc/PyHealth](https://github.com/sunlabuiuc/PyHealth) | 783 |\\n| [jondurbin/airoboros](https://github.com/jondurbin/airoboros) | 783 |\\n| [pyspark-ai/pyspark-ai](https://github.com/pyspark-ai/pyspark-ai) | 782 |\\n| [confident-ai/deepeval](https://github.com/confident-ai/deepeval) | 780 |\\n| [billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 777 |\\n| [langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent) | 776 |\\n| [akshata29/entaoai](https://github.com/akshata29/entaoai) | 771 |\\n| [LambdaLabsML/examples](https://github.com/LambdaLabsML/examples) | 770 |\\n| [getmetal/motorhead](https://github.com/getmetal/motorhead) | 768 |\\n| [Dicklesworthstone/swiss\\\\_army\\\\_llama](https://github.com/Dicklesworthstone/swiss_army_llama) | 757 |\\n| [ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 757 |\\n| [msoedov/langcorn](https://github.com/msoedov/langcorn) | 754 |\\n| [e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 753 |\\n| [microsoft/sample-app-aoai-chatGPT](https://github.com/microsoft/sample-app-aoai-chatGPT) | 749 |\\n| [explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 731 |\\n| [MiuLab/Taiwan-LLM](https://github.com/MiuLab/Taiwan-LLM) | 716 |\\n| [whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 702 |\\n| [Azure-Samples/openai](https://github.com/Azure-Samples/openai) | 692 |\\n| [iusztinpaul/hands-on-llms](https://github.com/iusztinpaul/hands-on-llms) | 687 |\\n| [safevideo/autollm](https://github.com/safevideo/autollm) | 682 |\\n| [OpenGenerativeAI/GenossGPT](https://github.com/OpenGenerativeAI/GenossGPT) | 669 |\\n| [NoDataFound/hackGPT](https://github.com/NoDataFound/hackGPT) | 663 |\\n| [AILab-CVC/GPT4Tools](https://github.com/AILab-CVC/GPT4Tools) | 662 |\\n| [langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 657 |\\n| [yvann-ba/Robby-chatbot](https://github.com/yvann-ba/Robby-chatbot) | 639 |\\n| [alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 635 |\\n| [amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 630 |\\n| [microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 621 |\\n| [aws-samples/aws-genai-llm-chatbot](https://github.com/aws-samples/aws-genai-llm-chatbot) | 616 |\\n| [NeumTry/NeumAI](https://github.com/NeumTry/NeumAI) | 605 |\\n| [namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 599 |\\n| [plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 595 |\\n| [marimo-team/marimo](https://github.com/marimo-team/marimo) | 591 |\\n| [yakami129/VirtualWife](https://github.com/yakami129/VirtualWife) | 586 |\\n| [xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 584 |\\n| [jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 573 |\\n| [dgarnitz/vectorflow](https://github.com/dgarnitz/vectorflow) | 568 |\\n| [yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 564 |\\n| [daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 563 |\\n| [traceloop/openllmetry](https://github.com/traceloop/openllmetry) | 559 |\\n| [Agenta-AI/agenta](https://github.com/Agenta-AI/agenta) | 546 |\\n| [michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 545 |\\n| [jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 544 |\\n| [mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 533 |\\n| [marella/chatdocs](https://github.com/marella/chatdocs) | 532 |\\n| [opentensor/bittensor](https://github.com/opentensor/bittensor) | 532 |\\n| [DjangoPeng/openai-quickstart](https://github.com/DjangoPeng/openai-quickstart) | 527 |\\n| [freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 517 |\\n| [sidhq/Multi-GPT](https://github.com/sidhq/Multi-GPT) | 515 |\\n| [alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 514 |\\n| [sajjadium/ctf-archives](https://github.com/sajjadium/ctf-archives) | 507 |\\n| [continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 502 |\\n| [steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 494 |\\n| [mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 493 |\\n| [langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 492 |\\n| [logan-markewich/llama\\\\_index\\\\_starter\\\\_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 483 |\\n| [datawhalechina/llm-universe](https://github.com/datawhalechina/llm-universe) | 475 |\\n| [leondz/garak](https://github.com/leondz/garak) | 464 |\\n| [RedisVentures/ArXivChatGuru](https://github.com/RedisVentures/ArXivChatGuru) | 461 |\\n| [Anil-matcha/Chatbase](https://github.com/Anil-matcha/Chatbase) | 455 |\\n| [Aiyu-awa/luna-ai](https://github.com/Aiyu-awa/luna-ai) | 450 |\\n| [DataDog/dd-trace-py](https://github.com/DataDog/dd-trace-py) | 450 |\\n| [Azure-Samples/miyagi](https://github.com/Azure-Samples/miyagi) | 449 |\\n| [poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 447 |\\n| [onlyphantom/llm-python](https://github.com/onlyphantom/llm-python) | 446 |\\n| [junruxiong/IncarnaMind](https://github.com/junruxiong/IncarnaMind) | 441 |\\n| [CarperAI/OpenELM](https://github.com/CarperAI/OpenELM) | 441 |\\n| [daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 437 |\\n| [showlab/VLog](https://github.com/showlab/VLog) | 436 |\\n| [wandb/weave](https://github.com/wandb/weave) | 420 |\\n| [QwenLM/Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) | 419 |\\n| [huchenxucs/ChatDB](https://github.com/huchenxucs/ChatDB) | 416 |\\n| [jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 411 |\\n| [monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 408 |\\n| [mallorbc/Finetune\\\\_LLMs](https://github.com/mallorbc/Finetune_LLMs) | 406 |\\n| [JayZeeDesign/researcher-gpt](https://github.com/JayZeeDesign/researcher-gpt) | 405 |\\n| [rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 401 |\\n| [langchain-ai/langsmith-cookbook](https://github.com/langchain-ai/langsmith-cookbook) | 398 |\\n| [mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 398 |\\n| [morpheuslord/GPT\\\\_Vuln-analyzer](https://github.com/morpheuslord/GPT_Vuln-analyzer) | 391 |\\n| [MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 387 |\\n| [JohnSnowLabs/langtest](https://github.com/JohnSnowLabs/langtest) | 384 |\\n| [mrwadams/attackgen](https://github.com/mrwadams/attackgen) | 381 |\\n| [codefuse-ai/Test-Agent](https://github.com/codefuse-ai/Test-Agent) | 380 |\\n| [personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 379 |\\n| [mosaicml/examples](https://github.com/mosaicml/examples) | 378 |\\n| [steamship-packages/langchain-production-starter](https://github.com/steamship-packages/langchain-production-starter) | 370 |\\n| [FlagAI-Open/Aquila2](https://github.com/FlagAI-Open/Aquila2) | 365 |\\n| [Mintplex-Labs/vector-admin](https://github.com/Mintplex-Labs/vector-admin) | 365 |\\n| [NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 357 |\\n| [BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 354 |\\n| [lilacai/lilac](https://github.com/lilacai/lilac) | 352 |\\n| [preset-io/promptimize](https://github.com/preset-io/promptimize) | 351 |\\n| [yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 347 |\\n| [andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 346 |\\n| [zhoudaquan/ChatAnything](https://github.com/zhoudaquan/ChatAnything) | 343 |\\n| [rgomezcasas/dotfiles](https://github.com/rgomezcasas/dotfiles) | 343 |\\n| [tigerlab-ai/tiger](https://github.com/tigerlab-ai/tiger) | 342 |\\n| [HumanSignal/label-studio-ml-backend](https://github.com/HumanSignal/label-studio-ml-backend) | 334 |\\n| [nasa-petal/bidara](https://github.com/nasa-petal/bidara) | 334 |\\n| [momegas/megabots](https://github.com/momegas/megabots) | 334 |\\n| [Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 330 |\\n| [CambioML/pykoi](https://github.com/CambioML/pykoi) | 326 |\\n| [Nuggt-dev/Nuggt](https://github.com/Nuggt-dev/Nuggt) | 326 |\\n| [wandb/edu](https://github.com/wandb/edu) | 326 |\\n| [Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 324 |\\n| [sugarforever/LangChain-Tutorials](https://github.com/sugarforever/LangChain-Tutorials) | 322 |\\n| [liangwq/Chatglm\\\\_lora\\\\_multi-gpu](https://github.com/liangwq/Chatglm_lora_multi-gpu) | 321 |\\n| [ur-whitelab/chemcrow-public](https://github.com/ur-whitelab/chemcrow-public) | 320 |\\n| [itamargol/openai](https://github.com/itamargol/openai) | 318 |\\n| [gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 304 |\\n| [SpecterOps/Nemesis](https://github.com/SpecterOps/Nemesis) | 302 |\\n| [facebookresearch/personal-timeline](https://github.com/facebookresearch/personal-timeline) | 302 |\\n| [hnawaz007/pythondataanalysis](https://github.com/hnawaz007/pythondataanalysis) | 301 |\\n| [Chainlit/cookbook](https://github.com/Chainlit/cookbook) | 300 |\\n| [airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 300 |\\n| [GPT-Fathom/GPT-Fathom](https://github.com/GPT-Fathom/GPT-Fathom) | 299 |\\n| [kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 299 |\\n| [kyegomez/swarms](https://github.com/kyegomez/swarms) | 296 |\\n| [LangStream/langstream](https://github.com/LangStream/langstream) | 295 |\\n| [genia-dev/GeniA](https://github.com/genia-dev/GeniA) | 294 |\\n| [shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 291 |\\n| [TsinghuaDatabaseGroup/DB-GPT](https://github.com/TsinghuaDatabaseGroup/DB-GPT) | 290 |\\n| [conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 283 |\\n| [sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 283 |\\n| [AutoPackAI/beebot](https://github.com/AutoPackAI/beebot) | 282 |\\n| [pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 282 |\\n| [gkamradt/LLMTest\\\\_NeedleInAHaystack](https://github.com/gkamradt/LLMTest_NeedleInAHaystack) | 280 |\\n| [gustavz/DataChad](https://github.com/gustavz/DataChad) | 280 |\\n| [Safiullah-Rahu/CSV-AI](https://github.com/Safiullah-Rahu/CSV-AI) | 278 |\\n| [hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 275 |\\n| [AkshitIreddy/Interactive-LLM-Powered-NPCs](https://github.com/AkshitIreddy/Interactive-LLM-Powered-NPCs) | 268 |\\n| [ennucore/clippinator](https://github.com/ennucore/clippinator) | 267 |\\n| [artitw/text2text](https://github.com/artitw/text2text) | 264 |\\n| [anarchy-ai/LLM-VM](https://github.com/anarchy-ai/LLM-VM) | 263 |\\n| [wpydcr/LLM-Kit](https://github.com/wpydcr/LLM-Kit) | 262 |\\n| [streamlit/llm-examples](https://github.com/streamlit/llm-examples) | 262 |\\n| [paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 262 |\\n| [yym68686/ChatGPT-Telegram-Bot](https://github.com/yym68686/ChatGPT-Telegram-Bot) | 261 |\\n| [PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 259 |\\n| [radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 259 |\\n| [ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 259 |\\n| [ml6team/fondant](https://github.com/ml6team/fondant) | 254 |\\n| [bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 254 |\\n| [rahulnyk/knowledge\\\\_graph](https://github.com/rahulnyk/knowledge_graph) | 253 |\\n| [recalign/RecAlign](https://github.com/recalign/RecAlign) | 248 |\\n| [hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 248 |\\n| [fetchai/uAgents](https://github.com/fetchai/uAgents) | 247 |\\n| [arthur-ai/bench](https://github.com/arthur-ai/bench) | 247 |\\n| [miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 246 |\\n| [RoboCoachTechnologies/GPT-Synthesizer](https://github.com/RoboCoachTechnologies/GPT-Synthesizer) | 244 |\\n| [langchain-ai/web-explorer](https://github.com/langchain-ai/web-explorer) | 242 |\\n| [kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 242 |\\n| [PJLab-ADG/DriveLikeAHuman](https://github.com/PJLab-ADG/DriveLikeAHuman) | 241 |\\n| [stepanogil/autonomous-hr-chatbot](https://github.com/stepanogil/autonomous-hr-chatbot) | 238 |\\n| [WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 236 |\\n| [nexus-stc/stc](https://github.com/nexus-stc/stc) | 235 |\\n| [yeagerai/genworlds](https://github.com/yeagerai/genworlds) | 235 |\\n| [Gentopia-AI/Gentopia](https://github.com/Gentopia-AI/Gentopia) | 235 |\\n| [alphasecio/langchain-examples](https://github.com/alphasecio/langchain-examples) | 235 |\\n| [grumpyp/aixplora](https://github.com/grumpyp/aixplora) | 232 |\\n| [shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 232 |\\n| [darrenburns/elia](https://github.com/darrenburns/elia) | 231 |\\n| [orgexyz/BlockAGI](https://github.com/orgexyz/BlockAGI) | 231 |\\n| [handrew/browserpilot](https://github.com/handrew/browserpilot) | 226 |\\n| [su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 225 |\\n| [nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 225 |\\n| [dbpunk-labs/octogen](https://github.com/dbpunk-labs/octogen) | 224 |\\n| [langchain-ai/weblangchain](https://github.com/langchain-ai/weblangchain) | 222 |\\n| [CL-lau/SQL-GPT](https://github.com/CL-lau/SQL-GPT) | 222 |\\n| [alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 221 |\\n| [showlab/UniVTG](https://github.com/showlab/UniVTG) | 220 |\\n| [edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 219 |\\n| [hardbyte/qabot](https://github.com/hardbyte/qabot) | 216 |\\n| [microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 215 |\\n| [Azure-Samples/chat-with-your-data-solution-accelerator](https://github.com/Azure-Samples/chat-with-your-data-solution-accelerator) | 214 |\\n| [amadad/agentcy](https://github.com/amadad/agentcy) | 213 |\\n| [snexus/llm-search](https://github.com/snexus/llm-search) | 212 |\\n| [afaqueumer/DocQA](https://github.com/afaqueumer/DocQA) | 206 |\\n| [plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 205 |\\n| [yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 205 |\\n| [benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 205 |\\n| [voxel51/voxelgpt](https://github.com/voxel51/voxelgpt) | 204 |\\n| [jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 204 |\\n| [emarco177/ice\\\\_breaker](https://github.com/emarco177/ice_breaker) | 204 |\\n| [tencentmusic/supersonic](https://github.com/tencentmusic/supersonic) | 202 |\\n| [Azure-Samples/azure-search-power-skills](https://github.com/Azure-Samples/azure-search-power-skills) | 202 |\\n| [blob42/Instrukt](https://github.com/blob42/Instrukt) | 201 |\\n| [langchain-ai/langsmith-sdk](https://github.com/langchain-ai/langsmith-sdk) | 200 |\\n| [SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 200 |\\n| [ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) | 198 |\\n| [KMnO4-zx/huanhuan-chat](https://github.com/KMnO4-zx/huanhuan-chat) | 196 |\\n| [Azure-Samples/jp-azureopenai-samples](https://github.com/Azure-Samples/jp-azureopenai-samples) | 192 |\\n| [hongbo-miao/hongbomiao.com](https://github.com/hongbo-miao/hongbomiao.com) | 190 |\\n| [CakeCrusher/openplugin](https://github.com/CakeCrusher/openplugin) | 190 |\\n| [PaddlePaddle/ERNIE-Bot-SDK](https://github.com/PaddlePaddle/ERNIE-Bot-SDK) | 189 |\\n| [retr0reg/Ret2GPT](https://github.com/retr0reg/Ret2GPT) | 189 |\\n| [AmineDiro/cria](https://github.com/AmineDiro/cria) | 187 |\\n| [lancedb/vectordb-recipes](https://github.com/lancedb/vectordb-recipes) | 186 |\\n| [vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 185 |\\n| [aws-ia/ecs-blueprints](https://github.com/aws-ia/ecs-blueprints) | 184 |\\n| [ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 183 |\\n| [MuhammadMoinFaisal/LargeLanguageModelsProjects](https://github.com/MuhammadMoinFaisal/LargeLanguageModelsProjects) | 182 |\\n| [shauryr/S2QA](https://github.com/shauryr/S2QA) | 181 |\\n| [summarizepaper/summarizepaper](https://github.com/summarizepaper/summarizepaper) | 180 |\\n| [NomaDamas/RAGchain](https://github.com/NomaDamas/RAGchain) | 179 |\\n| [pnkvalavala/repochat](https://github.com/pnkvalavala/repochat) | 179 |\\n| [ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 177 |\\n| [fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 177 |\\n| [langchain-ai/text-split-explorer](https://github.com/langchain-ai/text-split-explorer) | 175 |\\n| [iMagist486/ElasticSearch-Langchain-Chatglm2](https://github.com/iMagist486/ElasticSearch-Langchain-Chatglm2) | 175 |\\n| [limaoyi1/Auto-PPT](https://github.com/limaoyi1/Auto-PPT) | 175 |\\n| [Open-Swarm-Net/GPT-Swarm](https://github.com/Open-Swarm-Net/GPT-Swarm) | 175 |\\n| [morpheuslord/HackBot](https://github.com/morpheuslord/HackBot) | 174 |\\n| [v7labs/benchllm](https://github.com/v7labs/benchllm) | 174 |\\n| [Coding-Crashkurse/Langchain-Full-Course](https://github.com/Coding-Crashkurse/Langchain-Full-Course) | 174 |\\n| [dongyh20/Octopus](https://github.com/dongyh20/Octopus) | 173 |\\n| [kimtth/azure-openai-llm-vector-langchain](https://github.com/kimtth/azure-openai-llm-vector-langchain) | 173 |\\n| [mayooear/private-chatbot-mpt30b-langchain](https://github.com/mayooear/private-chatbot-mpt30b-langchain) | 173 |\\n| [zilliztech/akcio](https://github.com/zilliztech/akcio) | 172 |\\n| [jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 172 |\\n| [ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 172 |\\n| [joaomdmoura/CrewAI](https://github.com/joaomdmoura/CrewAI) | 170 |\\n| [katanaml/llm-mistral-invoice-cpu](https://github.com/katanaml/llm-mistral-invoice-cpu) | 170 |\\n| [chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 170 |\\n| [mudler/LocalAGI](https://github.com/mudler/LocalAGI) | 167 |\\n| [dssjon/biblos](https://github.com/dssjon/biblos) | 165 |\\n| [kjappelbaum/gptchem](https://github.com/kjappelbaum/gptchem) | 165 |\\n| [xxw1995/chatglm3-finetune](https://github.com/xxw1995/chatglm3-finetune) | 164 |\\n| [ArjanCodes/examples](https://github.com/ArjanCodes/examples) | 163 |\\n| [AIAnytime/Llama2-Medical-Chatbot](https://github.com/AIAnytime/Llama2-Medical-Chatbot) | 163 |\\n| [RCGAI/SimplyRetrieve](https://github.com/RCGAI/SimplyRetrieve) | 162 |\\n| [langchain-ai/langchain-teacher](https://github.com/langchain-ai/langchain-teacher) | 162 |\\n| [menloparklab/falcon-langchain](https://github.com/menloparklab/falcon-langchain) | 162 |\\n| [flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 162 |\\n| [homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 161 |\\n| [jiran214/langup-ai](https://github.com/jiran214/langup-ai) | 160 |\\n| [JorisdeJong123/7-Days-of-LangChain](https://github.com/JorisdeJong123/7-Days-of-LangChain) | 160 |\\n| [GoogleCloudPlatform/data-analytics-golden-demo](https://github.com/GoogleCloudPlatform/data-analytics-golden-demo) | 159 |\\n| [positive666/Prompt-Can-Anything](https://github.com/positive666/Prompt-Can-Anything) | 159 |\\n| [luisroque/large\\\\_laguage\\\\_models](https://github.com/luisroque/large_laguage_models) | 159 |\\n| [mlops-for-all/mlops-for-all.github.io](https://github.com/mlops-for-all/mlops-for-all.github.io) | 158 |\\n| [wandb/wandbot](https://github.com/wandb/wandbot) | 158 |\\n| [elastic/elasticsearch-labs](https://github.com/elastic/elasticsearch-labs) | 157 |\\n| [shroominic/funcchain](https://github.com/shroominic/funcchain) | 157 |\\n| [deeppavlov/dream](https://github.com/deeppavlov/dream) | 156 |\\n| [mluogh/eastworld](https://github.com/mluogh/eastworld) | 154 |\\n| [georgesung/llm\\\\_qlora](https://github.com/georgesung/llm_qlora) | 154 |\\n| [RUC-GSAI/YuLan-Rec](https://github.com/RUC-GSAI/YuLan-Rec) | 153 |\\n| [KylinC/ChatFinance](https://github.com/KylinC/ChatFinance) | 152 |\\n| [Dicklesworthstone/llama2\\\\_aided\\\\_tesseract](https://github.com/Dicklesworthstone/llama2_aided_tesseract) | 152 |\\n| [c0sogi/LLMChat](https://github.com/c0sogi/LLMChat) | 152 |\\n| [eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 152 |\\n| [ErikBjare/gptme](https://github.com/ErikBjare/gptme) | 152 |\\n| [Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 152 |\\n| [RoboCoachTechnologies/ROScribe](https://github.com/RoboCoachTechnologies/ROScribe) | 151 |\\n| [Aggregate-Intellect/sherpa](https://github.com/Aggregate-Intellect/sherpa) | 151 |\\n| [3Alan/DocsMind](https://github.com/3Alan/DocsMind) | 151 |\\n| [tangqiaoyu/ToolAlpaca](https://github.com/tangqiaoyu/ToolAlpaca) | 150 |\\n| [kulltc/chatgpt-sql](https://github.com/kulltc/chatgpt-sql) | 150 |\\n| [mallahyari/drqa](https://github.com/mallahyari/drqa) | 150 |\\n| [MedalCollector/Orator](https://github.com/MedalCollector/Orator) | 149 |\\n| [Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 149 |\\n| [realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 148 |\\n| [ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 148 |\\n| [solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 147 |\\n| [aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 147 |\\n| [Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 146 |\\n| [menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 146 |\\n| [trancethehuman/entities-extraction-web-scraper](https://github.com/trancethehuman/entities-extraction-web-scraper) | 144 |\\n| [peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 144 |\\n| [grumpyp/chroma-langchain-tutorial](https://github.com/grumpyp/chroma-langchain-tutorial) | 144 |\\n| [gh18l/CrawlGPT](https://github.com/gh18l/CrawlGPT) | 142 |\\n| [langchain-ai/langchain-aws-template](https://github.com/langchain-ai/langchain-aws-template) | 142 |\\n| [yasyf/summ](https://github.com/yasyf/summ) | 141 |\\n| [petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 141 |\\n| [hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 140 |\\n| [jina-ai/fastapi-serve](https://github.com/jina-ai/fastapi-serve) | 139 |\\n| [zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 139 |\\n| [jlonge4/local\\\\_llama](https://github.com/jlonge4/local_llama) | 139 |\\n| [smyja/blackmaria](https://github.com/smyja/blackmaria) | 138 |\\n| [ChuloAI/BrainChulo](https://github.com/ChuloAI/BrainChulo) | 137 |\\n| [log1stics/voice-generator-webui](https://github.com/log1stics/voice-generator-webui) | 137 |\\n| [davila7/file-gpt](https://github.com/davila7/file-gpt) | 137 |\\n| [dcaribou/transfermarkt-datasets](https://github.com/dcaribou/transfermarkt-datasets) | 136 |\\n| [ciare-robotics/world-creator](https://github.com/ciare-robotics/world-creator) | 135 |\\n| [Undertone0809/promptulate](https://github.com/Undertone0809/promptulate) | 134 |\\n| [fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 134 |\\n| [run-llama/ai-engineer-workshop](https://github.com/run-llama/ai-engineer-workshop) | 133 |\\n| [definitive-io/code-indexer-loop](https://github.com/definitive-io/code-indexer-loop) | 131 |\\n| [mortium91/langchain-assistant](https://github.com/mortium91/langchain-assistant) | 131 |\\n| [baidubce/bce-qianfan-sdk](https://github.com/baidubce/bce-qianfan-sdk) | 130 |\\n| [Ngonie-x/langchain\\\\_csv](https://github.com/Ngonie-x/langchain_csv) | 130 |\\n| [IvanIsCoding/ResuLLMe](https://github.com/IvanIsCoding/ResuLLMe) | 130 |\\n| [AnchoringAI/anchoring-ai](https://github.com/AnchoringAI/anchoring-ai) | 129 |\\n| [Azure/business-process-automation](https://github.com/Azure/business-process-automation) | 128 |\\n| [athina-ai/athina-sdk](https://github.com/athina-ai/athina-sdk) | 126 |\\n| [thunlp/ChatEval](https://github.com/thunlp/ChatEval) | 126 |\\n| [prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 126 |\\n| [vietanhdev/pautobot](https://github.com/vietanhdev/pautobot) | 125 |\\n| [awslabs/generative-ai-cdk-constructs](https://github.com/awslabs/generative-ai-cdk-constructs) | 124 |\\n| [sdaaron/QueryGPT](https://github.com/sdaaron/QueryGPT) | 124 |\\n| [rabbitmetrics/langchain-13-min](https://github.com/rabbitmetrics/langchain-13-min) | 124 |\\n| [AutoLLM/AutoAgents](https://github.com/AutoLLM/AutoAgents) | 122 |\\n| [nicknochnack/Nopenai](https://github.com/nicknochnack/Nopenai) | 122 |\\n| [wombyz/HormoziGPT](https://github.com/wombyz/HormoziGPT) | 122 |\\n| [dotvignesh/PDFChat](https://github.com/dotvignesh/PDFChat) | 122 |\\n| [topoteretes/PromethAI-Backend](https://github.com/topoteretes/PromethAI-Backend) | 121 |\\n| [nftblackmagic/flask-langchain](https://github.com/nftblackmagic/flask-langchain) | 121 |\\n| [vishwasg217/finsight](https://github.com/vishwasg217/finsight) | 120 |\\n| [snap-stanford/MLAgentBench](https://github.com/snap-stanford/MLAgentBench) | 120 |\\n| [Azure/app-service-linux-docs](https://github.com/Azure/app-service-linux-docs) | 120 |\\n| [nyanp/chat2plot](https://github.com/nyanp/chat2plot) | 120 |\\n| [ant4g0nist/polar](https://github.com/ant4g0nist/polar) | 119 |\\n| [aws-samples/cdk-eks-blueprints-patterns](https://github.com/aws-samples/cdk-eks-blueprints-patterns) | 119 |\\n| [aws-samples/amazon-kendra-langchain-extensions](https://github.com/aws-samples/amazon-kendra-langchain-extensions) | 119 |\\n| [Xueheng-Li/SynologyChatbotGPT](https://github.com/Xueheng-Li/SynologyChatbotGPT) | 119 |\\n| [CodeAlchemyAI/ViLT-GPT](https://github.com/CodeAlchemyAI/ViLT-GPT) | 117 |\\n| [Lin-jun-xiang/docGPT-langchain](https://github.com/Lin-jun-xiang/docGPT-langchain) | 117 |\\n| [ademakdogan/ChatSQL](https://github.com/ademakdogan/ChatSQL) | 116 |\\n| [aniketmaurya/llm-inference](https://github.com/aniketmaurya/llm-inference) | 115 |\\n| [xuwenhao/mactalk-ai-course](https://github.com/xuwenhao/mactalk-ai-course) | 115 |\\n| [cmooredev/RepoReader](https://github.com/cmooredev/RepoReader) | 115 |\\n| [abi/autocommit](https://github.com/abi/autocommit) | 115 |\\n| [MIDORIBIN/langchain-gpt4free](https://github.com/MIDORIBIN/langchain-gpt4free) | 114 |\\n| [finaldie/auto-news](https://github.com/finaldie/auto-news) | 114 |\\n| [Anil-matcha/Youtube-to-chatbot](https://github.com/Anil-matcha/Youtube-to-chatbot) | 114 |\\n| [avrabyt/MemoryBot](https://github.com/avrabyt/MemoryBot) | 114 |\\n| [Capsize-Games/airunner](https://github.com/Capsize-Games/airunner) | 113 |\\n| [atisharma/llama\\\\_farm](https://github.com/atisharma/llama_farm) | 113 |\\n| [mbchang/data-driven-characters](https://github.com/mbchang/data-driven-characters) | 112 |\\n| [fiddler-labs/fiddler-auditor](https://github.com/fiddler-labs/fiddler-auditor) | 112 |\\n| [dirkjbreeuwer/gpt-automated-web-scraper](https://github.com/dirkjbreeuwer/gpt-automated-web-scraper) | 111 |\\n| [Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding](https://github.com/Appointat/Chat-with-Document-s-using-ChatGPT-API-and-Text-Embedding) | 111 |\\n| [hwchase17/langchain-gradio-template](https://github.com/hwchase17/langchain-gradio-template) | 111 |\\n| [artas728/spelltest](https://github.com/artas728/spelltest) | 110 |\\n| [NVIDIA/GenerativeAIExamples](https://github.com/NVIDIA/GenerativeAIExamples) | 109 |\\n| [Azure/aistudio-copilot-sample](https://github.com/Azure/aistudio-copilot-sample) | 108 |\\n| [codefuse-ai/codefuse-chatbot](https://github.com/codefuse-ai/codefuse-chatbot) | 108 |\\n| [apirrone/Memento](https://github.com/apirrone/Memento) | 108 |\\n| [e-johnstonn/GPT-Doc-Summarizer](https://github.com/e-johnstonn/GPT-Doc-Summarizer) | 108 |\\n| [salesforce/BOLAA](https://github.com/salesforce/BOLAA) | 107 |\\n| [Erol444/gpt4-openai-api](https://github.com/Erol444/gpt4-openai-api) | 106 |\\n| [linjungz/chat-with-your-doc](https://github.com/linjungz/chat-with-your-doc) | 106 |\\n| [crosleythomas/MirrorGPT](https://github.com/crosleythomas/MirrorGPT) | 106 |\\n| [panaverse/learn-generative-ai](https://github.com/panaverse/learn-generative-ai) | 105 |\\n| [Azure/azure-sdk-tools](https://github.com/Azure/azure-sdk-tools) | 105 |\\n| [malywut/gpt\\\\_examples](https://github.com/malywut/gpt_examples) | 105 |\\n| [ritun16/chain-of-verification](https://github.com/ritun16/chain-of-verification) | 104 |\\n| [langchain-ai/langchain-benchmarks](https://github.com/langchain-ai/langchain-benchmarks) | 104 |\\n| [lightninglabs/LangChainBitcoin](https://github.com/lightninglabs/LangChainBitcoin) | 104 |\\n| [flepied/second-brain-agent](https://github.com/flepied/second-brain-agent) | 103 |\\n| [llmapp/openai.mini](https://github.com/llmapp/openai.mini) | 102 |\\n| [gimlet-ai/tddGPT](https://github.com/gimlet-ai/tddGPT) | 102 |\\n| [jlonge4/gpt\\\\_chatwithPDF](https://github.com/jlonge4/gpt_chatwithPDF) | 102 |\\n| [agentification/RAFA\\\\_code](https://github.com/agentification/RAFA_code) | 101 |\\n| [pacman100/DHS-LLM-Workshop](https://github.com/pacman100/DHS-LLM-Workshop) | 101 |\\n| [aws-samples/private-llm-qa-bot](https://github.com/aws-samples/private-llm-qa-bot) | 101 |\\n\\n_Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)\\n_\\n\\n`github-dependents-info --repo \"langchain-ai/langchain\" --markdownfile dependents.md --minstars 100 --sort stars`\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/dependents.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/dependents/', 'pageStatusCode': 200}}, {'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\n3rd Party Tutorials\\n===================\\n\\nTutorials[\\u200b](#tutorials \"Direct link to Tutorials\")\\n\\n----------------------------------------------------\\n\\n### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)\\n[\\u200b](#langchain-v-01-by-langchainai \"Direct link to langchain-v-01-by-langchainai\")\\n\\n### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)\\n[\\u200b](#build-with-langchain---advanced-by-langchainai \"Direct link to build-with-langchain---advanced-by-langchainai\")\\n\\n### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)\\n[\\u200b](#langgraph-by-langchainai \"Direct link to langgraph-by-langchainai\")\\n\\n### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)\\n[\\u200b](#by-greg-kamradt \"Direct link to by-greg-kamradt\")\\n\\n### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)\\n[\\u200b](#by-sam-witteveen \"Direct link to by-sam-witteveen\")\\n\\n### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)\\n[\\u200b](#by-james-briggs \"Direct link to by-james-briggs\")\\n\\n### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)\\n[\\u200b](#by-prompt-engineering \"Direct link to by-prompt-engineering\")\\n\\n### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)\\n[\\u200b](#by-mayo-oshin \"Direct link to by-mayo-oshin\")\\n\\n### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)\\n[\\u200b](#by-1-little-coder \"Direct link to by-1-little-coder\")\\n\\n### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)\\n[\\u200b](#by-boblin-chinese-language \"Direct link to by-boblin-chinese-language\")\\n\\nCourses[\\u200b](#courses \"Direct link to Courses\")\\n\\n----------------------------------------------\\n\\n### Featured courses on Deeplearning.AI[\\u200b](#featured-courses-on-deeplearningai \"Direct link to Featured courses on Deeplearning.AI\")\\n\\n* [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)\\n \\n* [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)\\n \\n* [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)\\n \\n* [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)\\n \\n\\n### Online courses[\\u200b](#online-courses \"Direct link to Online courses\")\\n\\n* [Udemy](https://www.udemy.com/courses/search/?q=langchain)\\n \\n* [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)\\n \\n* [Pluralsight](https://www.pluralsight.com/search?q=langchain)\\n \\n* [Coursera](https://www.coursera.org/search?query=langchain)\\n \\n* [Maven](https://maven.com/courses?query=langchain)\\n \\n* [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)\\n \\n* [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)\\n \\n* [edX](https://www.edx.org/search?q=langchain)\\n \\n* [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)\\n \\n\\nShort Tutorials[\\u200b](#short-tutorials \"Direct link to Short Tutorials\")\\n\\n----------------------------------------------------------------------\\n\\n* [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8)\\n \\n* [by Patrick Loeber](https://youtu.be/LbT1yp6quS8)\\n \\n* [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)\\n \\n* [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)\\n \\n\\nBooks and Handbooks[\\u200b](#books-and-handbooks \"Direct link to Books and Handbooks\")\\n\\n----------------------------------------------------------------------------------\\n\\n* [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1)\\n by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true)\\n , ©️ 2023 Packt Publishing\\n* [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/)\\n By **James Briggs** and **Francisco Ingham**\\n* [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde)\\n by **Ivan Reznikov**\\n* [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)\\n \\n\\n* * *\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/tutorials.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Tutorials](#tutorials)\\n * [LangChain v 0.1 by LangChain.ai](#langchain-v-01-by-langchainai)\\n \\n * [Build with Langchain - Advanced by LangChain.ai](#build-with-langchain---advanced-by-langchainai)\\n \\n * [LangGraph by LangChain.ai](#langgraph-by-langchainai)\\n \\n * [by Greg Kamradt](#by-greg-kamradt)\\n \\n * [by Sam Witteveen](#by-sam-witteveen)\\n \\n * [by James Briggs](#by-james-briggs)\\n \\n * [by Prompt Engineering](#by-prompt-engineering)\\n \\n * [by Mayo Oshin](#by-mayo-oshin)\\n \\n * [by 1 little Coder](#by-1-little-coder)\\n \\n * [by BobLin (Chinese language)](#by-boblin-chinese-language)\\n \\n* [Courses](#courses)\\n * [Featured courses on Deeplearning.AI](#featured-courses-on-deeplearningai)\\n \\n * [Online courses](#online-courses)\\n \\n* [Short Tutorials](#short-tutorials)\\n \\n* [Books and Handbooks](#books-and-handbooks)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/tutorials/', 'pageStatusCode': 200}}, {'markdown': '[Skip to main content](#__docusaurus_skipToContent_fallback)\\n\\nLangChain 0.2 is out! Leave feedback on the v0.2 docs [here](https://github.com/langchain-ai/langchain/discussions/21716)\\n. You can view the v0.1 docs [here](/v0.1/docs/get_started/introduction/)\\n.\\n\\n[![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark.png)![🦜️🔗 LangChain](https://python.langchain.com/v0.2/img/brand/wordmark-dark.png)](/v0.2/)\\n[Integrations](/v0.2/docs/integrations/platforms/)\\n[API Reference](https://api.python.langchain.com)\\n\\n[More](#)\\n\\n* [People](/v0.2/docs/people/)\\n \\n* [Contributing](/v0.2/docs/contributing/)\\n \\n* [Templates](/v0.2/docs/templates/)\\n \\n* [Cookbooks](https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md)\\n \\n* [3rd party tutorials](/v0.2/docs/additional_resources/tutorials/)\\n \\n* [YouTube](/v0.2/docs/additional_resources/youtube/)\\n \\n* [arXiv](/v0.2/docs/additional_resources/arxiv_references/)\\n \\n\\n[v0.2](#)\\n\\n* [v0.2](/v0.2/docs/introduction/)\\n \\n* [v0.1](https://python.langchain.com/v0.1/docs/get_started/introduction)\\n \\n\\n[🦜️🔗](#)\\n\\n* [LangSmith](https://smith.langchain.com)\\n \\n* [LangSmith Docs](https://docs.smith.langchain.com/)\\n \\n* [LangServe GitHub](https://github.com/langchain-ai/langserve)\\n \\n* [Templates GitHub](https://github.com/langchain-ai/langchain/tree/master/templates)\\n \\n* [Templates Hub](https://templates.langchain.com)\\n \\n* [LangChain Hub](https://smith.langchain.com/hub)\\n \\n* [JS/TS Docs](https://js.langchain.com)\\n \\n\\n[💬](https://chat.langchain.com)\\n[](https://github.com/langchain-ai/langchain)\\n\\nSearch\\n\\nOn this page\\n\\nYouTube videos\\n==============\\n\\n\\\\[Updated 2024-05-16\\\\]\\n\\n### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)\\n[\\u200b](#official-langchain-youtube-channel \"Direct link to official-langchain-youtube-channel\")\\n\\n### [Tutorials on YouTube](/v0.2/docs/additional_resources/tutorials/#tutorials)\\n[\\u200b](#tutorials-on-youtube \"Direct link to tutorials-on-youtube\")\\n\\nVideos (sorted by views)[\\u200b](#videos-sorted-by-views \"Direct link to Videos (sorted by views)\")\\n\\n-----------------------------------------------------------------------------------------------\\n\\nOnly videos with 40K+ views:\\n\\n* [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)\\n \\n* [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)\\n \\n* [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)\\n \\n* [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)\\n \\n* [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)\\n \\n* [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)\\n \\n* [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)\\n \\n* [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)\\n \\n* [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)\\n \\n* [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)\\n \\n* [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)\\n \\n* [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)\\n \\n* [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)\\n \\n* [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)\\n \\n* [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)\\n \\n* [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)\\n \\n* [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)\\n \\n* [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)\\n \\n* [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)\\n \\n* [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)\\n \\n* [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)\\n \\n* [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)\\n \\n* [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)\\n \\n* [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)\\n \\n* [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)\\n \\n* [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)\\n \\n* [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)\\n \\n* [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)\\n \\n* [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)\\n \\n* [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)\\n \\n* [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)\\n \\n* [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)\\n \\n* [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)\\n \\n* [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)\\n \\n* [Prompt Engineering And LLM\\'s With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)\\n \\n* [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)\\n \\n* [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)\\n \\n* [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)\\n \\n* [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)\\n \\n* [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)\\n \\n* [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)\\n \\n* [What\\'s next for AI agents ft. LangChain\\'s Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)\\n \\n* [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)\\n \\n* [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)\\n \\n* [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)\\n \\n* [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)\\n \\n* [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)\\n \\n* [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)\\n \\n\\n* * *\\n\\n\\\\[Updated 2024-05-16\\\\]\\n\\n[Edit this page](https://github.com/langchain-ai/langchain/edit/master/docs/docs/additional_resources/youtube.mdx)\\n\\n* * *\\n\\n#### Was this page helpful?\\n\\n \\n\\n#### You can also leave detailed feedback [on GitHub](https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E)\\n.\\n\\n* [Official LangChain YouTube channel](#official-langchain-youtube-channel)\\n \\n* [Tutorials on YouTube](#tutorials-on-youtube)\\n \\n* [Videos (sorted by views)](#videos-sorted-by-views)\\n \\n\\nCommunity\\n\\n* [Discord](https://discord.gg/cU2adEyC7w)\\n \\n* [Twitter](https://twitter.com/LangChainAI)\\n \\n\\nGitHub\\n\\n* [Organization](https://github.com/langchain-ai)\\n \\n* [Python](https://github.com/langchain-ai/langchain)\\n \\n* [JS/TS](https://github.com/langchain-ai/langchainjs)\\n \\n\\nMore\\n\\n* [Homepage](https://langchain.com)\\n \\n* [Blog](https://blog.langchain.dev)\\n \\n* [YouTube](https://www.youtube.com/@LangChain)\\n \\n\\nCopyright © 2024 LangChain, Inc.', 'metadata': {'ogLocaleAlternate': [], 'sourceURL': 'https://python.langchain.com/v0.2/docs/additional_resources/youtube/', 'pageStatusCode': 200}}]\n",
|
||
"==================================================\n",
|
||
"\n",
|
||
"Initial Response:\n",
|
||
"Stop Reason: tool_use\n",
|
||
"Content: [TextBlock(text='Based on the crawl results, here are the most common topics related to LangChain, along with their counts:\\n\\n1. Tutorials and Courses (30+)\\n2. Integration with other tools/platforms (20+)\\n - OpenAI/ChatGPT (10+)\\n - Hugging Face (5+)\\n - Google (Gemini, PaLM) (5+)\\n - Llama models (5+)\\n3. RAG (Retrieval Augmented Generation) (10+)\\n4. PDF processing/chat (10+)\\n5. Agents and autonomous systems (8+)\\n6. Vector databases/embeddings (7+)\\n - Pinecone (2)\\n - Chroma (2)\\n7. Prompt engineering (5+)\\n8. LLM applications/projects (5+)\\n9. Conversational AI/Chatbots (5+)\\n10. Code-related applications (4+)\\n\\nLet me visualize this data for you using a bar chart.', type='text'), ToolUseBlock(id='toolu_01LRPwF6JRobktpyjHt5Dv5a', input={'code': \"import matplotlib.pyplot as plt\\n\\ntopics = [\\n 'Tutorials/Courses', 'Integrations', 'RAG', 'PDF processing', \\n 'Agents', 'Vector DBs', 'Prompt engineering', 'LLM projects', \\n 'Chatbots', 'Code applications'\\n]\\ncounts = [30, 20, 10, 10, 8, 7, 5, 5, 5, 4]\\n\\nplt.figure(figsize=(12, 6))\\nplt.bar(topics, counts)\\nplt.title('Most Common LangChain Topics')\\nplt.xlabel('Topics')\\nplt.ylabel('Approximate Count')\\nplt.xticks(rotation=45, ha='right')\\nplt.tight_layout()\\nplt.show()\"}, name='execute_python', type='tool_use')]\n",
|
||
"\n",
|
||
"Tool Used: execute_python\n",
|
||
"Tool Input: {'code': \"import matplotlib.pyplot as plt\\n\\ntopics = [\\n 'Tutorials/Courses', 'Integrations', 'RAG', 'PDF processing', \\n 'Agents', 'Vector DBs', 'Prompt engineering', 'LLM projects', \\n 'Chatbots', 'Code applications'\\n]\\ncounts = [30, 20, 10, 10, 8, 7, 5, 5, 5, 4]\\n\\nplt.figure(figsize=(12, 6))\\nplt.bar(topics, counts)\\nplt.title('Most Common LangChain Topics')\\nplt.xlabel('Topics')\\nplt.ylabel('Approximate Count')\\nplt.xticks(rotation=45, ha='right')\\nplt.tight_layout()\\nplt.show()\"}\n",
|
||
"Running code interpreter...\n",
|
||
"Tool Result: [Result(<Figure size 1200x600 with 1 Axes>)]\n",
|
||
"[Result(<Figure size 1200x600 with 1 Axes>)]\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"from e2b_code_interpreter import CodeInterpreter\n",
|
||
"\n",
|
||
"with CodeInterpreter(api_key=e2b_api_key) as code_interpreter:\n",
|
||
" code_interpreter_results = chat_with_claude(\n",
|
||
" code_interpreter,\n",
|
||
" \"Use python to identify the most common topics in the crawl results. For each topic, count the number of times it appears in the crawl results and plot them. Here is the crawl results: \" + str(cleaned_crawl_result),\n",
|
||
" )\n",
|
||
"print(code_interpreter_results)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 15,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"<Figure size 1200x600 with 1 Axes>\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"image/png": "iVBORw0KGgoAAAANSUhEUgAABKUAAAJOCAYAAABm7rQwAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAACxhUlEQVR4nOzdd3gUZfv28XMTIAktVOkdpEjvIL230JWm0kEfOiJSBASlSFdEsCBFuii9iUiTIgICglIFpHdIaIEk1/sHb/aXNeBDNNnNk3w/x5FD9p6ZzZUdZ3b23Hvu22FmJgAAAAAAAMCNvDxdAAAAAAAAAOIfQikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAARIt27dopadKk0fqc2bNnV7t27aL1Od3p3XfflcPh8HQZAADESoRSAADEErNmzZLD4ZDD4dCPP/4YabmZKUuWLHI4HGrQoEGM1HDhwgW9++672r9/f5S2O3nypLp27aqcOXPK19dXyZMn14svvqgPP/xQ9+/fj5Fa44Lwfb5nzx5Pl/K3Hjx4oEmTJqlMmTLy9/eXr6+vnn/+eXXv3l3Hjh3zdHnP5PTp087j67/9nD592tPlAgAQLyTwdAEAAMCVr6+v5s+frwoVKri0b9myRefOnZOPj0+M/e4LFy5o+PDhyp49u4oWLfpM26xevVovvfSSfHx89Nprr6lgwYJ6+PChfvzxR7311ls6fPiwPvvssxirGTHr2rVrqlOnjvbu3asGDRqodevWSpo0qY4ePaqFCxfqs88+08OHD2Ps9x89elReXv/+e9S0adPqq6++cmmbMGGCzp07p0mTJkVaN7q88847GjBgQLQ9HwAAcQmhFAAAsUy9evX09ddf66OPPlKCBP/3Vj1//nyVKFFC165d82B1rk6dOqWWLVsqW7Zs+uGHH5QhQwbnsm7duunEiRNavXq1ByvEv9WuXTv98ssvWrJkiZo1a+ay7L333tPgwYNj9PdHVwibJEkSvfLKKy5tCxcu1M2bNyO1R6cECRK4HMcAAOD/cPseAACxTKtWrXT9+nVt2LDB2fbw4UMtWbJErVu3fuI2d+/e1ZtvvqksWbLIx8dHefPm1fjx42VmLutt2LBBFSpUUIoUKZQ0aVLlzZtXgwYNkiRt3rxZpUqVkiS1b9/eeSvTrFmznlrr2LFjdefOHc2YMcMlkAqXO3du9erVy/k4JCRE7733nnLlyiUfHx9lz55dgwYNUnBwsMt22bNnV4MGDbR582aVLFlSfn5+KlSokDZv3ixJ+vbbb1WoUCH5+vqqRIkS+uWXX1y2Dx/b6M8//1SDBg2UNGlSZcqUSVOnTpUk/frrr6pWrZqSJEmibNmyaf78+ZFq/+OPP/TSSy8pVapUSpw4scqWLRspYNu8ebMcDocWL16skSNHKnPmzPL19VX16tV14sSJp75uUfHw4UMNHTpUJUqUkL+/v5IkSaKKFStq06ZNLuuF3542fvx4ffbZZ87XuFSpUvr5558jPe/XX3+tAgUKyNfXVwULFtTSpUvVrl07Zc+e3bnOTz/9pNWrV6tjx46RAinpcWA0fvz4SO3nz59X48aNlTRpUqVNm1b9+vVTaGioyzrjx49X+fLllTp1avn5+alEiRJasmRJpOf665hS4bc8bt++XX379lXatGmVJEkSNWnSRFevXv1vL+d/deXKFXXs2FHp0qWTr6+vihQpotmzZ7usE/G1njRpkrJlyyY/Pz9VrlxZhw4dcln3aWNKzZ07V6VLl1bixImVMmVKVapUSd99951z+Z49e1S7dm2lSZNGfn5+ypEjhzp06PCv/z4AAGITQikAAGKZ7Nmzq1y5clqwYIGzbe3atbp9+7ZatmwZaX0zU8OGDTVp0iTVqVNHEydOVN68efXWW2+pb9++zvUOHz6sBg0aKDg4WCNGjNCECRPUsGFDbd++XZKUP39+jRgxQpLUpUsXffXVV/rqq69UqVKlp9a6cuVK5cyZU+XLl3+mv61Tp04aOnSoihcvrkmTJqly5coaPXr0E/+uEydOqHXr1goICNDo0aN18+ZNBQQEaN68eerTp49eeeUVDR8+XCdPntTLL7+ssLAwl+1DQ0NVt25dZcmSRWPHjlX27NnVvXt3zZo1S3Xq1FHJkiX1wQcfKFmyZHrttdd06tQp57aXL19W+fLltX79ev3nP//RyJEj9eDBAzVs2FBLly6NVOuYMWO0dOlS9evXTwMHDtSuXbvUpk2bZ3pN/pvAwEB98cUXqlKlij744AO9++67unr1qmrXrv3Esb/mz5+vcePGqWvXrnr//fd1+vRpNW3aVI8ePXKus3r1arVo0UIJEybU6NGj1bRpU3Xs2FF79+51ea4VK1ZIkl599dVnrjc0NFS1a9dW6tSpNX78eFWuXFkTJkyIdAvnhx9+qGLFimnEiBEaNWqUEiRIoJdeeumZe9b16NFDBw4c0LBhw/TGG29o5cqV6t69+zPX+ST3799XlSpV9NVXX6lNmzYaN26c/P391a5dO3344YeR1p8zZ44++ugjdevWTQMHDtShQ4dUrVo1Xb58+W9/z/Dhw/Xqq68qYcKEGjFihIYPH64sWbLohx9+kPQ4GKtVq5ZOnz6tAQMGaMqUKWrTpo127dr1r/4+AABiHQMAALHCzJkzTZL9/PPP9vHHH1uyZMns3r17Zmb20ksvWdWqVc3MLFu2bFa/fn3ndsuWLTNJ9v7777s8X/Pmzc3hcNiJEyfMzGzSpEkmya5evfrUGn7++WeTZDNnzvyv9d6+fdskWaNGjZ7p79u/f79Jsk6dOrm09+vXzyTZDz/84GzLli2bSbIdO3Y429avX2+SzM/Pz86cOeNs//TTT02Sbdq0ydnWtm1bk2SjRo1ytt28edP8/PzM4XDYwoULne1HjhwxSTZs2DBnW+/evU2Sbdu2zdkWFBRkOXLksOzZs1toaKiZmW3atMkkWf78+S04ONi57ocffmiS7Ndff/3b1yTiPn+akJAQl+cO/1vSpUtnHTp0cLadOnXKJFnq1Kntxo0bzvbly5ebJFu5cqWzrVChQpY5c2YLCgpytm3evNkkWbZs2ZxtTZo0MUl28+bNv/07woW/7iNGjHBpL1asmJUoUcKlLfz/7XAPHz60ggULWrVq1Vzas2XLZm3btnU+Dn/NatSoYWFhYc72Pn36mLe3t926deuZajUzq1+/vsvfO3nyZJNkc+fOdamrXLlyljRpUgsMDDSz/3ut/fz87Ny5c851f/rpJ5Nkffr0cbYNGzbMIl5yHz9+3Ly8vKxJkybO/4/Chf89S5cu/a//XwAAEBfQUwoAgFjo5Zdf1v3797Vq1SoFBQVp1apVT711b82aNfL29lbPnj1d2t98802ZmdauXStJSpEihSRp+fLlkXoV/ROBgYGSpGTJkj3T+mvWrJEkl95b4XVKitRDpkCBAipXrpzzcZkyZSRJ1apVU9asWSO1//HHH5F+Z6dOnZz/TpEihfLmzaskSZLo5ZdfdrbnzZtXKVKkcNl+zZo1Kl26tMtg80mTJlWXLl10+vRp/fbbby6/p3379kqUKJHzccWKFZ9aU1R5e3s7nzssLEw3btxQSEiISpYsqX379kVav0WLFkqZMuVTa7lw4YJ+/fVXvfbaa0qaNKlzvcqVK6tQoUIuzxXVfRzu9ddfd3lcsWLFSK+Fn5+f8983b97U7du3VbFixSf+TU/SpUsXl9viKlasqNDQUJ05cyZKtUa0Zs0apU+fXq1atXK2JUyYUD179tSdO3e0ZcsWl/UbN26sTJkyOR+XLl1aZcqUcf6//iTLli1TWFiYhg4dGmkA9/C/J/xYXbVqlUsPNwAA4hpCKQAAYqG0adOqRo0amj9/vr799luFhoaqefPmT1z3zJkzypgxY6TgIH/+/M7l0uOw4sUXX1SnTp2ULl06tWzZUosXL/7HAVXy5MklSUFBQc+0/pkzZ+Tl5aXcuXO7tKdPn14pUqSIFCZEDJ4kyd/fX5KUJUuWJ7bfvHnTpd3X1zfSLGr+/v7KnDlzpDF+/P39XbY/c+aM8ubNG+lv+Otr+rRaw0Ohv9b0T82ePVuFCxeWr6+vUqdOrbRp02r16tW6fft2pHX/Wy3htf91PzypLar7WHry654yZcpIr8WqVatUtmxZ+fr6KlWqVEqbNq2mTZv2xL/pSWLiNT9z5ozy5MkTKSx62n7PkydPpOd4/vnndfr06af+jpMnT8rLy0sFChR46jqVK1dWs2bNNHz4cKVJk0aNGjXSzJkzI429BgDA/zpCKQAAYqnWrVtr7dq1mj59uurWrevsPfFP+fn5aevWrfr+++/16quv6uDBg2rRooVq1qwZaRDqZ5E8eXJlzJgx0sDO/82TBn1+Em9v7yi1218Gdf+320dFTDxnuLlz56pdu3bKlSuXZsyYoXXr1mnDhg2qVq3aEwPF6KwlX758kh4PDP+snvb7I9q2bZsaNmwoX19fffLJJ1qzZo02bNig1q1bP3OdMfmae5rD4dCSJUu0c+dOde/eXefPn1eHDh1UokQJ3blzx9PlAQAQbQilAACIpZo0aSIvLy/t2rXrqbfuSVK2bNl04cKFSL1Zjhw54lwezsvLS9WrV9fEiRP122+/aeTIkfrhhx+cM7k9a2AUrkGDBjp58qR27tz5X9fNli2bwsLCdPz4cZf2y5cv69atWy51elq2bNl09OjRSO1Pek1j2pIlS5QzZ059++23evXVV1W7dm3VqFFDDx48+EfPF177k2YH/GtbQECApMfBWHT65ptv5Ovrq/Xr16tDhw6qW7euatSoEa2/45/Ili2bjh8/Hinse9p+/+v/y5J07NgxlxkM/ypXrlwKCwuLdAvok5QtW1YjR47Unj17NG/ePB0+fFgLFy58hr8EAID/DYRSAADEUkmTJtW0adP07rvvOsOBJ6lXr55CQ0P18ccfu7RPmjRJDodDdevWlSTduHEj0rZFixaVJOdtQUmSJJEk3bp165lq7N+/v5IkSaJOnTo9ccaxkydPOmctq1evniRp8uTJLutMnDhRklS/fv1n+p3uUK9ePe3evdslbLt7964+++wzZc+e/W9vvYpu4T2CIvYA+umnn54pCHySjBkzqmDBgpozZ45Lr5stW7ZE6hFVrlw51alTR1988YWWLVsW6bkePnyofv36RbkGb29vORwOlx56p0+ffuLvcKd69erp0qVLWrRokbMtJCREU6ZMUdKkSVW5cmWX9ZctW6bz5887H+/evVs//fST85h7ksaNG8vLy0sjRoyIFH6F7+ObN29G6vH112MVAIC4IIGnCwAAAE/Xtm3b/7pOQECAqlatqsGDB+v06dMqUqSIvvvuOy1fvly9e/dWrly5JEkjRozQ1q1bVb9+fWXLlk1XrlzRJ598osyZMzsH9M6VK5dSpEih6dOnK1myZEqSJInKlCmjHDlyPPF358qVS/Pnz1eLFi2UP39+vfbaaypYsKAePnyoHTt26Ouvv1a7du0kSUWKFFHbtm312Wef6datW6pcubJ2796t2bNnq3HjxqpatWr0vGjRYMCAAVqwYIHq1q2rnj17KlWqVJo9e7ZOnTqlb775JtKYQ//Wl19+qXXr1kVq79Wrlxo0aKBvv/1WTZo0Uf369XXq1ClNnz5dBQoU+Me3co0aNUqNGjXSiy++qPbt2+vmzZv6+OOPVbBgwUjPOWfOHNWqVUtNmzZVQECAqlevriRJkuj48eNauHChLl68qPHjx0fp99evX18TJ05UnTp11Lp1a125ckVTp05V7ty5dfDgwX/0N0WHLl266NNPP1W7du20d+9eZc+eXUuWLNH27ds1efLkSOO25c6dWxUqVNAbb7yh4OBgTZ48WalTp1b//v2f+jty586twYMH67333lPFihXVtGlT+fj46Oeff1bGjBk1evRozZ49W5988omaNGmiXLlyKSgoSJ9//rmSJ0/uDHcBAIgLCKUAAPgf5+XlpRUrVmjo0KFatGiRZs6cqezZs2vcuHHOme0kqWHDhjp9+rS+/PJLXbt2TWnSpFHlypU1fPhw52DhCRMm1OzZszVw4EC9/vrrCgkJ0cyZM58aSoU/78GDBzVu3DgtX75c06ZNk4+PjwoXLqwJEyaoc+fOznW/+OIL5cyZU7NmzdLSpUuVPn16DRw4UMOGDYu5F+gfSJcunXbs2KG3335bU6ZM0YMHD1S4cGGtXLkyRnp0TZs27Ynt7dq1U7t27XTp0iV9+umnWr9+vQoUKKC5c+fq66+/1ubNm//R7wsICNCCBQv07rvvasCAAcqTJ49mzZql2bNn6/Dhwy7rpk2bVjt27NAnn3yiRYsWafDgwXr48KGyZcumhg0bqlevXlH+/dWqVdOMGTM0ZswY9e7dWzly5NAHH3yg06dPezSU8vPz0+bNmzVgwADNnj1bgYGByps3r2bOnOkMVyN67bXX5OXlpcmTJ+vKlSsqXbq0Pv74Y2XIkOFvf8+IESOUI0cOTZkyRYMHD1bixIlVuHBhvfrqq5LkDGwXLlyoy5cvy9/fX6VLl9a8efP+9lgEAOB/jcPiwmiQAAAA+NeKFi2qtGnTasOGDZ4uJVY7ffq0cuTIoXHjxv2j2xcBAMBjjCkFAAAQzzx69EghISEubZs3b9aBAwdUpUoVzxQFAADiHW7fAwAAiGfOnz+vGjVq6JVXXlHGjBl15MgRTZ8+XenTp9frr7/u6fIAAEA8QSgFAAAQz6RMmVIlSpTQF198oatXrypJkiSqX7++xowZo9SpU3u6PAAAEE8wphQAAAAAAADcjjGlAAAAAAAA4HaEUgAAAAAAAHC7OD+mVFhYmC5cuKBkyZLJ4XB4uhwAAAAAAIA4zcwUFBSkjBkzysvr6f2h4nwodeHCBWXJksXTZQAAAAAAAMQrZ8+eVebMmZ+6PM6HUsmSJZP0+IVInjy5h6sBAAAAAACI2wIDA5UlSxZnJvM0cT6UCr9lL3ny5IRSAAAAAAAAbvLfhlFioHMAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAt/NoKDVt2jQVLlxYyZMnV/LkyVWuXDmtXbvWufzBgwfq1q2bUqdOraRJk6pZs2a6fPmyBysGAAAAAABAdPBoKJU5c2aNGTNGe/fu1Z49e1StWjU1atRIhw8fliT16dNHK1eu1Ndff60tW7bowoULatq0qSdLBgAAAAAAQDRwmJl5uoiIUqVKpXHjxql58+ZKmzat5s+fr+bNm0uSjhw5ovz582vnzp0qW7bsMz1fYGCg/P39dfv2bSVPnjwmSwcAAAAAAIj3njWLiTVjSoWGhmrhwoW6e/euypUrp7179+rRo0eqUaOGc518+fIpa9as2rlzpwcrBQAAAAAAwL+VwNMF/PrrrypXrpwePHigpEmTaunSpSpQoID279+vRIkSKUWKFC7rp0uXTpcuXXrq8wUHBys4ONj5ODAwMKZKBwAAAAAAwD/k8VAqb9682r9/v27fvq0lS5aobdu22rJlyz9+vtGjR2v48OHRWGHskn3Aak+XEGecHlPf0yUAAAAAABBvefz2vUSJEil37twqUaKERo8erSJFiujDDz9U+vTp9fDhQ926dctl/cuXLyt9+vRPfb6BAwfq9u3bzp+zZ8/G8F8AAAAAAACAqPJ4KPVXYWFhCg4OVokSJZQwYUJt3LjRuezo0aP6888/Va5cuadu7+Pjo+TJk7v8AAAAAAAAIHbx6O17AwcOVN26dZU1a1YFBQVp/vz52rx5s9avXy9/f3917NhRffv2VapUqZQ8eXL16NFD5cqVe+aZ9wAAAAAAABA7eTSUunLlil577TVdvHhR/v7+Kly4sNavX6+aNWtKkiZNmiQvLy81a9ZMwcHBql27tj755BNPlgwAAAAAAIBo4DAz83QRMSkwMFD+/v66fft2nLiVj4HOow8DnQMAAAAAEP2eNYuJdWNKAQAAAAAAIO4jlAIAAAAAAIDbEUoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAtyOUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdoRSAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbkcoBQAAAAAAALcjlAIAAAAAAIDbEUoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAtyOUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdoRSAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbkcoBQAAAAAAALcjlAIAAAAAAIDbEUoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3M6jodTo0aNVqlQpJUuWTM8995waN26so0ePuqxTpUoVORwOl5/XX3/dQxUDAAAAAAAgOng0lNqyZYu6deumXbt2acOGDXr06JFq1aqlu3fvuqzXuXNnXbx40fkzduxYD1UMAAAAAACA6JDAk7983bp1Lo9nzZql5557Tnv37lWlSpWc7YkTJ1b69OndXR4AAAAAAABiSKwaU+r27duSpFSpUrm0z5s3T2nSpFHBggU1cOBA3bt376nPERwcrMDAQJcfAAAAAAAAxC4e7SkVUVhYmHr37q0XX3xRBQsWdLa3bt1a2bJlU8aMGXXw4EG9/fbbOnr0qL799tsnPs/o0aM1fPhwd5UNAAAAAACAf8BhZubpIiTpjTfe0Nq1a/Xjjz8qc+bMT13vhx9+UPXq1XXixAnlypUr0vLg4GAFBwc7HwcGBipLliy6ffu2kidPHiO1u1P2Aas9XUKccXpMfU+XAAAAAABAnBMYGCh/f///msXEip5S3bt316pVq7R169a/DaQkqUyZMpL01FDKx8dHPj4+MVInAAAAAAAAoodHQykzU48ePbR06VJt3rxZOXLk+K/b7N+/X5KUIUOGGK4OAAAAAAAAMcWjoVS3bt00f/58LV++XMmSJdOlS5ckSf7+/vLz89PJkyc1f/581atXT6lTp9bBgwfVp08fVapUSYULF/Zk6QAAAAAAAPgXPBpKTZs2TZJUpUoVl/aZM2eqXbt2SpQokb7//ntNnjxZd+/eVZYsWdSsWTO98847HqgWAAAAAAAA0cXjt+/9nSxZsmjLli1uqgYAAAAAAADu4uXpAgAAAAAAABD/EEoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAtyOUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdoRSAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbkcoBQAAAAAAALcjlAIAAAAAAIDbEUoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAtyOUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdoRSAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbhflUGrOnDkKDg6O1P7w4UPNmTMnWooCAAAAAABA3BblUKp9+/a6fft2pPagoCC1b98+WooCAAAAAABA3BblUMrM5HA4IrWfO3dO/v7+0VIUAAAAAAAA4rYEz7pisWLF5HA45HA4VL16dSVI8H+bhoaG6tSpU6pTp06MFAkAAAAAAIC45ZlDqcaNG0uS9u/fr9q1aytp0qTOZYkSJVL27NnVrFmzaC8QAAAAAAAAcc8zh1LDhg2TJGXPnl0tWrSQr69vjBUFAAAAAACAuO2ZQ6lwbdu2lfR4tr0rV64oLCzMZXnWrFmjpzIAAAAAAADEWVEe6Pz48eOqWLGi/Pz8lC1bNuXIkUM5cuRQ9uzZlSNHjig91+jRo1WqVCklS5ZMzz33nBo3bqyjR4+6rPPgwQN169ZNqVOnVtKkSdWsWTNdvnw5qmUDAAAAAAAgFolyT6l27dopQYIEWrVqlTJkyPDEmfie1ZYtW9StWzeVKlVKISEhGjRokGrVqqXffvtNSZIkkST16dNHq1ev1tdffy1/f391795dTZs21fbt2//x7wUAAAAAAIBnOczMorJBkiRJtHfvXuXLly/ai7l69aqee+45bdmyRZUqVdLt27eVNm1azZ8/X82bN5ckHTlyRPnz59fOnTtVtmzZ//qcgYGB8vf31+3bt5U8efJor9ndsg9Y7ekS4ozTY+p7ugQAAAAAAOKcZ81ionz7XoECBXTt2rV/VdzT3L59W5KUKlUqSdLevXv16NEj1ahRw7lOvnz5lDVrVu3cuTNGagAAAAAAAEDMi3Io9cEHH6h///7avHmzrl+/rsDAQJeffyosLEy9e/fWiy++qIIFC0qSLl26pESJEilFihQu66ZLl06XLl164vMEBwdHW00AAAAAAACIGVEeUyq811L16tVd2s1MDodDoaGh/6iQbt266dChQ/rxxx//0fbhRo8ereHDh/+r5wD+KW6vjD7cXgkAAAAAcVuUQ6lNmzZFexHdu3fXqlWrtHXrVmXOnNnZnj59ej18+FC3bt1y6S11+fJlpU+f/onPNXDgQPXt29f5ODAwUFmyZIn2mgEAAAAAAPDPRTmUqly5crT9cjNTjx49tHTpUm3evFk5cuRwWV6iRAklTJhQGzduVLNmzSRJR48e1Z9//qly5co98Tl9fHzk4+MTbTUCAAAAAAAg+kU5lNq6devfLq9UqdIzP1e3bt00f/58LV++XMmSJXOOE+Xv7y8/Pz/5+/urY8eO6tu3r1KlSqXkyZOrR48eKleu3DPNvAcAAAAAAIDYKcqhVJUqVSK1ORwO57+jMqbUtGnTnvicM2fOVLt27SRJkyZNkpeXl5o1a6bg4GDVrl1bn3zySVTLBgAAAAAAQCwS5VDq5s2bLo8fPXqkX375RUOGDNHIkSOj9Fxm9l/X8fX11dSpUzV16tQoPTcAAAAAAABiryiHUv7+/pHaatasqUSJEqlv377au3dvtBQGAAAAAACAuMsrup4oXbp0Onr0aHQ9HQAAAAAAAOKwKPeUOnjwoMtjM9PFixc1ZswYFS1aNLrqAgAAAAAAQBwW5VCqaNGicjgckcaDKlu2rL788stoKwwAAAAAAABxV5RDqVOnTrk89vLyUtq0aeXr6xttRQEAAAAAACBui3IolS1btpioAwAAAAAAAPHIPxrofMuWLQoICFDu3LmVO3duNWzYUNu2bYvu2gAAAAAAABBHRTmUmjt3rmrUqKHEiROrZ8+e6tmzp/z8/FS9enXNnz8/JmoEAAAAAABAHBPl2/dGjhypsWPHqk+fPs62nj17auLEiXrvvffUunXraC0QAAAAAAAAcU+Ue0r98ccfCggIiNTesGHDSIOgAwAAAAAAAE8S5VAqS5Ys2rhxY6T277//XlmyZImWogAAAAAAABC3Rfn2vTfffFM9e/bU/v37Vb58eUnS9u3bNWvWLH344YfRXiAAAAAAAADiniiHUm+88YbSp0+vCRMmaPHixZKk/Pnza9GiRWrUqFG0FwgAAAAAAIC4J8qhlCQ1adJETZo0ie5aAAAAAAAAEE8885hSN2/e1JQpUxQYGBhp2e3bt5+6DAAAAAAAAPirZw6lPv74Y23dulXJkyePtMzf31/btm3TlClTorU4AAAAAAAAxE3PHEp98803ev3115+6vGvXrlqyZEm0FAUAAAAAAIC47ZlDqZMnTypPnjxPXZ4nTx6dPHkyWooCAAAAAABA3PbMoZS3t7cuXLjw1OUXLlyQl9czPx0AAAAAAADisWdOkYoVK6Zly5Y9dfnSpUtVrFix6KgJAAAAAAAAcVyCZ12xe/fuatmypTJnzqw33nhD3t7ekqTQ0FB98sknmjRpkubPnx9jhQIAAAAAACDueOZQqlmzZurfv7969uypwYMHK2fOnJKkP/74Q3fu3NFbb72l5s2bx1ihAAAAAAAAiDueOZSSpJEjR6pRo0aaN2+eTpw4ITNT5cqV1bp1a5UuXTqmagQAAAAAAEAcE6VQSpJKly5NAAUAAAAAAIB/henyAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3O4fhVIhISH6/vvv9emnnyooKEiSdOHCBd25cydaiwMAAAAAAEDcFOXZ986cOaM6derozz//VHBwsGrWrKlkyZLpgw8+UHBwsKZPnx4TdQIAAAAAACAOiXJPqV69eqlkyZK6efOm/Pz8nO1NmjTRxo0bo7U4AAAAAAAAxE1R7im1bds27dixQ4kSJXJpz549u86fPx9thQEAAAAAACDuinJPqbCwMIWGhkZqP3funJIlSxYtRQEAAAAAACBui3IoVatWLU2ePNn52OFw6M6dOxo2bJjq1asXnbUBAAAAAAAgjory7XsTJkxQ7dq1VaBAAT148ECtW7fW8ePHlSZNGi1YsCAmagQAAAAAAEAcE+VQKnPmzDpw4IAWLVqkAwcO6M6dO+rYsaPatGnjMvA5AAAAAAAA8DRRDqW2bt2q8uXLq02bNmrTpo2zPSQkRFu3blWlSpWitUAAAAAAAADEPVEeU6pq1aq6ceNGpPbbt2+ratWq0VIUAAAAAAAA4rYoh1JmJofDEan9+vXrSpIkSbQUBQAAAAAAgLjtmW/fa9q0qaTHs+21a9dOPj4+zmWhoaE6ePCgypcvH/0VAgAAAAAAIM555lDK399f0uOeUsmSJXMZ1DxRokQqW7asOnfuHP0VAgAAAAAAIM555lBq5syZkqTs2bOrX79+3KoHAAAAAACAfyzKs+8NGzYsJuoAAAAAAABAPBLlUEqSlixZosWLF+vPP//Uw4cPXZbt27cvWgoDAAAAAABA3BXl2fc++ugjtW/fXunSpdMvv/yi0qVLK3Xq1Prjjz9Ut27dmKgRAAAAAAAAcUyUQ6lPPvlEn332maZMmaJEiRKpf//+2rBhg3r27Knbt2/HRI0AAAAAAACIY6IcSv35558qX768JMnPz09BQUGSpFdffVULFiyI3uoAAAAAAAAQJ0U5lEqfPr1u3LghScqaNat27dolSTp16pTMLHqrAwAAAAAAQJwU5VCqWrVqWrFihSSpffv26tOnj2rWrKkWLVqoSZMm0V4gAAAAAAAA4p4oz7732WefKSwsTJLUrVs3pU6dWjt27FDDhg3VtWvXaC8QAAAAAAAAcU+UQykvLy95ef1fB6uWLVuqZcuW0VoUAAAAAAAA4rYoh1KS9ODBAx08eFBXrlxx9poK17Bhw2gpDAAAAAAAAHFXlEOpdevW6bXXXtO1a9ciLXM4HAoNDY2WwgAAAAAAABB3RXmg8x49euill17SxYsXFRYW5vJDIAUAAAAAAIBnEeVQ6vLly+rbt6/SpUv3r3/51q1bFRAQoIwZM8rhcGjZsmUuy9u1ayeHw+HyU6dOnX/9ewEAAAAAAOBZUQ6lmjdvrs2bN0fLL797966KFCmiqVOnPnWdOnXq6OLFi86fBQsWRMvvBgAAAAAAgOdEeUypjz/+WC+99JK2bdumQoUKKWHChC7Le/bs+czPVbduXdWtW/dv1/Hx8VH69OmjWiYAAAAAAABisSiHUgsWLNB3330nX19fbd68WQ6Hw7nM4XBEKZR6Fps3b9Zzzz2nlClTqlq1anr//feVOnXqaP0dAAAAAAAAcK8oh1KDBw/W8OHDNWDAAHl5RfnuvyipU6eOmjZtqhw5cujkyZMaNGiQ6tatq507d8rb2/uJ2wQHBys4ONj5ODAwMEZrBAAAAAAAQNRFOZR6+PChWrRoEeOBlCS1bNnS+e9ChQqpcOHCypUrlzZv3qzq1as/cZvRo0dr+PDhMV4bAAAAAAAA/rkoJ0tt27bVokWLYqKW/ypnzpxKkyaNTpw48dR1Bg4cqNu3bzt/zp4968YKAQAAAAAA8Cyi3FMqNDRUY8eO1fr161W4cOFIA51PnDgx2or7q3Pnzun69evKkCHDU9fx8fGRj49PjNUAAAAAAACAfy/KodSvv/6qYsWKSZIOHTrksizioOfP4s6dOy69nk6dOqX9+/crVapUSpUqlYYPH65mzZopffr0OnnypPr376/cuXOrdu3aUS0bAAAAAAAAsUiUQ6lNmzZF2y/fs2ePqlat6nzct29fSY9vEZw2bZoOHjyo2bNn69atW8qYMaNq1aql9957j55QAAAAAAAA/+OiHEpFpypVqsjMnrp8/fr1bqwGAAAAAAAA7vJMoVTTpk01a9YsJU+eXE2bNv3bdb/99ttoKQwAAAAAAABx1zOFUv7+/s7xovz9/WO0IAAAAAAAAMR9zxRKzZw584n/BgAAAAAAAP4Jr6hucOTIkacuYwwoAAAAAAAAPIsoh1LFixfX1KlTXdqCg4PVvXt3NWrUKNoKAwAAAAAAQNwV5VBq1qxZGjp0qOrVq6fLly9r//79KlasmL7//ntt27YtJmoEAAAAAABAHBPlUOrll1/WgQMH9OjRI73wwgsqV66cKleurH379qlUqVIxUSMAAAAAAADimCiHUuEePnyo0NBQhYaGKkOGDPL19Y3OugAAAAAAABCHRTmUWrhwoQoVKiR/f38dO3ZMq1ev1meffaaKFSvqjz/+iIkaAQAAAAAAEMdEOZTq2LGjRo0apRUrViht2rSqWbOmDh48qEyZMqlo0aIxUCIAAAAAAADimgRR3WDfvn3KmzevS1uqVKm0ePFiffXVV9FWGAAAAAAAAOKuKIdS4YHU3r179fvvv0uSChQooOLFi+vVV1+N3uoAAAAAAAAQJ0U5lLpy5YpatmypzZs3K0WKFJKkW7duqWrVqlq4cKHSpk0b3TUCAAAAAAAgjonymFI9evRQUFCQDh8+rBs3bujGjRs6dOiQAgMD1bNnz5ioEQAAAAAAAHFMlHtKrVu3Tt9//73y58/vbCtQoICmTp2qWrVqRWtxAAAAAAAAiJui3FMqLCxMCRMmjNSeMGFChYWFRUtRAAAAAAAAiNuiHEpVq1ZNvXr10oULF5xt58+fV58+fVS9evVoLQ4AAAAAAABxU5RDqY8//liBgYHKnj27cuXKpVy5cilHjhwKDAzUlClTYqJGAAAAAAAAxDFRHlMqS5Ys2rdvn77//nsdOXJEkpQ/f37VqFEj2osDAAAAAABA3BSlUOrRo0fy8/PT/v37VbNmTdWsWTOm6gIAAAAAAEAcFqXb9xImTKisWbMqNDQ0puoBAAAAAABAPBDlMaUGDx6sQYMG6caNGzFRDwAAAAAAAOKBKI8p9fHHH+vEiRPKmDGjsmXLpiRJkrgs37dvX7QVBwAAAAAAgLgpyqFU48aNY6AMAAAAAAAAxCdRDqWGDRsWE3UAAAAAAAAgHolyKBVuz549+v333yVJBQoUUIkSJaKtKAAAAAAAAMRtUQ6lzp07p1atWmn79u1KkSKFJOnWrVsqX768Fi5cqMyZM0d3jQAAAAAAAIhjojz7XqdOnfTo0SP9/vvvunHjhm7cuKHff/9dYWFh6tSpU0zUCAAAAAAAgDgmyj2ltmzZoh07dihv3rzOtrx582rKlCmqWLFitBYHAAAAAACAuCnKPaWyZMmiR48eRWoPDQ1VxowZo6UoAAAAAAAAxG1RDqXGjRunHj16aM+ePc62PXv2qFevXho/fny0FgcAAAAAAIC4Kcq377Vr10737t1TmTJllCDB481DQkKUIEECdejQQR06dHCue+PGjeirFAAAAAAAAHFGlEOpyZMnx0AZAAAAAAAAiE+iHEq1bds2JuoAAAAAAABAPBLlUEp6PKj50qVL9fvvv0uSChQooEaNGjlv5wMAAAAAAAD+TpRTpMOHD6thw4a6dOmS8ubNK0n64IMPlDZtWq1cuVIFCxaM9iIBAAAAAAAQt0R59r1OnTrphRde0Llz57Rv3z7t27dPZ8+eVeHChdWlS5eYqBEAAAAAAABxTJR7Su3fv1979uxRypQpnW0pU6bUyJEjVapUqWgtDgAAAAAAAHFTlHtKPf/887p8+XKk9itXrih37tzRUhQAAAAAAADitiiHUqNHj1bPnj21ZMkSnTt3TufOndOSJUvUu3dvffDBBwoMDHT+AAAAAAAAAE8S5dv3GjRoIEl6+eWX5XA4JElmJkkKCAhwPnY4HAoNDY2uOgEAAAAAABCHRDmU2rRpU0zUAQAAAAAAgHgkyqFU5cqVn7rs0KFDKliw4L8qCAAAAAAAAHFflMeU+qugoCB99tlnKl26tIoUKRIdNQEAAAAAACCO+8eh1NatW9W2bVtlyJBB48ePV7Vq1bRr167orA0AAAAAAABxVJRu37t06ZJmzZqlGTNmKDAwUC+//LKCg4O1bNkyFShQIKZqBAAAAAAAQBzzzD2lAgIClDdvXh08eFCTJ0/WhQsXNGXKlJisDQAAAAAAAHHUM/eUWrt2rXr27Kk33nhDefLkicmaAAAAAAAAEMc9c0+pH3/8UUFBQSpRooTKlCmjjz/+WNeuXYvJ2gAAAAAAABBHPXMoVbZsWX3++ee6ePGiunbtqoULFypjxowKCwvThg0bFBQUFJN1AgAAAAAAIA6J8ux7SZIkUYcOHfTjjz/q119/1ZtvvqkxY8boueeeU8OGDWOiRgAAAAAAAMQxUQ6lIsqbN6/Gjh2rc+fOacGCBdFVEwAAAAAAAOK4fxVKhfP29lbjxo21YsWK6Hg6AAAAAAAAxHHREkoBAAAAAAAAUUEoBQAAAAAAALfzaCi1detWBQQEKGPGjHI4HFq2bJnLcjPT0KFDlSFDBvn5+alGjRo6fvy4Z4oFAAAAAABAtPFoKHX37l0VKVJEU6dOfeLysWPH6qOPPtL06dP1008/KUmSJKpdu7YePHjg5koBAAAAAAAQnRJ48pfXrVtXdevWfeIyM9PkyZP1zjvvqFGjRpKkOXPmKF26dFq2bJlatmzpzlIBAAAAAAAQjWLtmFKnTp3SpUuXVKNGDWebv7+/ypQpo507dz51u+DgYAUGBrr8AAAAAAAAIHbxaE+pv3Pp0iVJUrp06Vza06VL51z2JKNHj9bw4cNjtDYA/5uyD1jt6RLilNNj6kf7c7KPok9M7B8AAAAgOsXanlL/1MCBA3X79m3nz9mzZz1dEgAAAAAAAP4i1oZS6dOnlyRdvnzZpf3y5cvOZU/i4+Oj5MmTu/wAAAAAAAAgdom1oVSOHDmUPn16bdy40dkWGBion376SeXKlfNgZQAAAAAAAPi3PDqm1J07d3TixAnn41OnTmn//v1KlSqVsmbNqt69e+v9999Xnjx5lCNHDg0ZMkQZM2ZU48aNPVc0AAAAAAAA/jWPhlJ79uxR1apVnY/79u0rSWrbtq1mzZql/v376+7du+rSpYtu3bqlChUqaN26dfL19fVUyQAAAAAAAIgGHg2lqlSpIjN76nKHw6ERI0ZoxIgRbqwKAAAAAAAAMS3WjikFAAAAAACAuItQCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAtyOUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdoRSAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbkcoBQAAAAAAALdL4OkCAADA/4bsA1Z7uoQ44/SY+p4uAQAAwOPoKQUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAtyOUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdgk8XQAAAAD+vewDVnu6hDjj9Jj6ni4BAIB4gZ5SAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbkcoBQAAAAAAALcjlAIAAAAAAIDbEUoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAt4vVodS7774rh8Ph8pMvXz5PlwUAAAAAAIB/KYGnC/hvXnjhBX3//ffOxwkSxPqSAQAAAAAA8F/E+oQnQYIESp8+vafLAAAAAAAAQDSK1bfvSdLx48eVMWNG5cyZU23atNGff/75t+sHBwcrMDDQ5QcAAAAAAACxS6zuKVWmTBnNmjVLefPm1cWLFzV8+HBVrFhRhw4dUrJkyZ64zejRozV8+HA3VwoAAAA8XfYBqz1dQpxxekz9aH9O9k/0iu59xP6JXjFxDAH/VKzuKVW3bl299NJLKly4sGrXrq01a9bo1q1bWrx48VO3GThwoG7fvu38OXv2rBsrBgAAAAAAwLOI1T2l/ipFihR6/vnndeLEiaeu4+PjIx8fHzdWBQAAAAAAgKiK1T2l/urOnTs6efKkMmTI4OlSAAAAAAAA8C/E6lCqX79+2rJli06fPq0dO3aoSZMm8vb2VqtWrTxdGgAAAAAAAP6FWH373rlz59SqVStdv35dadOmVYUKFbRr1y6lTZvW06UBAAAAAADgX4jVodTChQs9XQIAAAAAAABiQKy+fQ8AAAAAAABxE6EUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0SeLoAAAAAAADwvyv7gNWeLiHOOD2mvqdLcCt6SgEAAAAAAMDtCKUAAAAAAADgdoRSAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbkcoBQAAAAAAALcjlAIAAAAAAIDbEUoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAtyOUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdoRSAAAAAAAAcDtCKQAAAAAAALgdoRQAAAAAAADcjlAKAAAAAAAAbkcoBQAAAAAAALcjlAIAAAAAAIDbEUoBAAAAAADA7QilAAAAAAAA4HaEUgAAAAAAAHA7QikAAAAAAAC4HaEUAAAAAAAA3I5QCgAAAAAAAG5HKAUAAAAAAAC3I5QCAAAAAACA2xFKAQAAAAAAwO0IpQAAAAAAAOB2hFIAAAAAAABwO0IpAAAAAAAAuB2hFAAAAAAAANyOUAoAAAAAAABuRygFAAAAAAAAt/ufCKWmTp2q7Nmzy9fXV2XKlNHu3bs9XRIAAAAAAAD+hVgfSi1atEh9+/bVsGHDtG/fPhUpUkS1a9fWlStXPF0aAAAAAAAA/qFYH0pNnDhRnTt3Vvv27VWgQAFNnz5diRMn1pdffunp0gAAAAAAAPAPxepQ6uHDh9q7d69q1KjhbPPy8lKNGjW0c+dOD1YGAAAAAACAfyOBpwv4O9euXVNoaKjSpUvn0p4uXTodOXLkidsEBwcrODjY+fj27duSpMDAwJgr1I3Cgu95uoQ4Iyb+n2D/RB/2T+zHPord2D+xG/sndmP/xG7sn9gvuvcR+yd6cQzFbnEluwj/O8zsb9dz2H9bw4MuXLigTJkyaceOHSpXrpyzvX///tqyZYt++umnSNu8++67Gj58uDvLBAAAAAAAwF+cPXtWmTNnfuryWN1TKk2aNPL29tbly5dd2i9fvqz06dM/cZuBAweqb9++zsdhYWG6ceOGUqdOLYfDEaP14rHAwEBlyZJFZ8+eVfLkyT1dDv6C/RO7sX9iN/ZP7Mb+if3YR7Eb+yd2Y//Ebuyf2I39435mpqCgIGXMmPFv14vVoVSiRIlUokQJbdy4UY0bN5b0OGTauHGjunfv/sRtfHx85OPj49KWIkWKGK4UT5I8eXIO+FiM/RO7sX9iN/ZP7Mb+if3YR7Eb+yd2Y//Ebuyf2I39417+/v7/dZ1YHUpJUt++fdW2bVuVLFlSpUuX1uTJk3X37l21b9/e06UBAAAAAADgH4r1oVSLFi109epVDR06VJcuXVLRokW1bt26SIOfAwAAAAAA4H9HrA+lJKl79+5PvV0PsY+Pj4+GDRsW6TZKxA7sn9iN/RO7sX9iN/ZP7Mc+it3YP7Eb+yd2Y//Ebuyf2CtWz74HAAAAAACAuMnL0wUAAAAAAAAg/iGUAgAAAAAAgNsRSgEAAAAAAMDtCKUAAAAAAADgdoRSAIB4jfk+AMRlYWFhLo8558Uuf90/iN04fmIfjqH/fYRSeCahoaHOf9+/f9+DlQBA9DEzORwOSdInn3yis2fPergiRBTxQvPRo0cerAR/J/xDGsdP7GNm8vJ6fLk/bdo0Xb16VQ6Hgw/WsURYWJhz/+zZs0fnzp3zcEX4q/BjJTAwUJKc1wyIHSIeQ7/99puHq8E/RSiF/yo0NFTe3t6SpGHDhmnRokW6evWqh6tCRH+9uORiM3YJDAzU+fPndeLECU+XggjCwsKcF5fHjh3TlClTVLduXV2+fNnDlUFyvdCcMGGCPvjgA12/ft3DVeGvwoPdlStXqnbt2po3bx7vQbFExHPcuXPnNHz4cNWrV083btwgmIoFIp7jBg8erK5du2rPnj26e/euhytDuPDz27p16zRgwABt2bLF0yUhgojH0LvvvqsWLVpo48aNHq4K/wShFP6WmTkDqaZNm2rhwoVKmzatEiVK5OHKEC7iRee+ffsUGBjItzixyNGjR9WxY0f169dPs2fPlkQ349gi/EJmxIgR6tevnxInTqzffvtNVatW1fnz5z1cHcL3T//+/TVhwgSlSJFCISEhLuvwodrzHA6Hli1bppYtW6pLly4qWrQo70GxRPgxNHToUHXv3l2ZMmXS3r17VaVKFV2/fp1gysMi7p8ZM2Zo1KhRqlatmpIkSeLhyhDO4XDo22+/VbNmzZQpUyalS5fOZTnHj2eFH0MDBw7U9OnTNWbMGOXNm9fDVeGfcBhHE57B22+/rdWrV2vLli1KnTq1JCkoKEjBwcFKkyaNh6uLvyJ+QzBkyBBt3bpVr7/+ul566SV5e3vzwcDDfv31V9WoUUMdO3ZUvXr1VKFCBUnS+fPnlSlTJpdbx+AZH330kQYPHqyVK1cqa9as+umnn/TRRx/pxo0b2rRpkzJmzOjpEuO12bNn66233tL333+vwoULS5KCg4MVHBysxIkTK0GCBBxHHnb16lXVrVtXLVu2VL9+/RQSEqJHjx4591mGDBn4IsuDPvzwQw0ZMkRr1qzRc889p6NHj+qdd97Ro0ePtG3bNqVOnZpjyM3Cr93MTL///ruaNWumiRMnqm7durpx44YuXLigLVu2KEuWLGrYsKGny43Xfv31V9WrV0/vvfee2rVr52w/ceKEcufOLcn1Whzud/DgQb300kuaPn26qlatqrt37+r69ev68ccfVapUKeXOnZvz2/+ABJ4uAP8bzp8/r4YNGyp16tTavXu3fvzxR33yySfKmjWrunTpopYtW3q6xHgp/E1w0KBB+vzzz7VgwQIVL15cCRL836HNm6VnnD59WgEBAXrllVc0atQoZ/uECRM0aNAgLVq0SI0bN+bDgAc9evRIe/bsUbt27VSlShVJUs6cOZUpUyZ1795dtWvX1vfffx/pm1HEnL+er06fPq06deqocOHC+v3337Vx40ZNnTpVqVKlUkBAgPr27Uvg4WF37tzR9evXVa5cOQUFBWny5MnasGGDduzYoYIFC2rcuHGqWbOmp8uMl8LCwnTo0CG1bt3a+aVInjx5lDVrVrVo0UI1a9bUhg0blDp1aq4V3CTikBgOh0MpUqRQokSJdPfuXW3ZskXz5s3Trl27nOH7rVu39Nprr3m46rgv4rVYxH109epVpUyZUgEBAXr06JFmz56tefPm6c8//9Tzzz+vtWvXctx4WGBgoG7cuKEXXnhBe/bs0fz587V27Vr9+eefKliwoD7//HPnl1qIvTiKEMlfO889ePBAwcHB2r17tzp06KBevXpp9+7datWqlTJkyKAZM2bo7t27dGH1kL179+rbb7/VihUrVKNGDXl7e+vYsWOaNm2ajhw5Ii8vL24Xc6Pw4+Drr79WwYIF9dZbbzmXjRs3TsOGDVO1atXUoUMHLVu2jNsnPChhwoRyOBzat2+fS3ulSpXUtGlTHT58WDVr1tSFCxc8VGH8cv36defF/Zdffqnr16/Lx8dHc+fO1ZAhQ/TSSy9p06ZNeuWVV5Q/f37NmzdPN2/e9HDV8cvZs2e1cOFCzZ8/XydPnpQk5ciRQ/ny5VPTpk2VN29e7du3T02bNtXNmzd1//59rV+/3sNVx19eXl4KCgpyOcc5HA4VKVJE7dq10/79+1WjRg3dv3/f2XMHMWfz5s3OMYk6deqkPn36KGXKlEqZMqXGjBmjatWqyc/PTx988IG2bNmirFmzco5zg/BA6vr167p586a8vb21du1aHTlyRIkSJVJYWJjefPNNFS9eXCtXrlShQoU0atQo7dmzRwsXLvR0+fHKkz7PlC9fXunSpVOJEiVUvXp1PXjwQCNHjtSFCxd07Ngx7d271wOVIqroKQUXEb8duHfvnu7du6c0adJo9OjRev/993X8+HH17dtXxYsXV65cuTRhwgRt3bpVvr6+9PbwkCRJkujBgwe6c+eOfv31V3366af67rvvFBoaqt69e+uXX35RgQIFPF1mvBF+HGzZskVeXl5Knz69JOnSpUv6/ffftXr1auXJk0djx45Vu3bt9OWXX6pp06aeLDleeFovgOrVq+vXX3/V3Llz9dJLL8nHx0eSlD9/frVu3Vq3bt1Sz5499dVXX8nPz8/dZccbW7ZsUbNmzbR3715NnjxZ8+bNU+3atTVgwABdv35dO3fu1Ouvv66aNWsqb9682r9/v3755Rfdu3fP06XHGwcPHlSDBg2UOXNm/fzzz2rYsKEmTJig7Nmza/78+Zo3b558fX3VvHlzJU2aVAkSJFDJkiXl7+9Pj1A3eNo5rnXr1ho8eLCmTZum119/3bkfcuXKpY4dO+rXX39V06ZNtWbNGvZRDDEz3b17V926dVP69Omd12kbN26Un5+fVqxYoZ9++knJkydXmTJlnNv9dQw9xAyHw6ErV66oVatWqlWrltKnT6/27dtr5cqVql+/vrp06aIDBw6ofv36ateunfLly6d79+5p8uTJSpEihafLjzcinuM2btyoBAkSKGnSpCpRooS2b9+ur776Svnz59eLL74oX19fhYWFqWDBgkqcOLGHK8czMeD/Cw0Ndf67S5cuVrNmTUuRIoW9+eabtnPnTjMze/DggXOdixcvWpEiRaxPnz5urzW+iriPwv3xxx/WsGFDy58/v/n5+dkbb7xhixYtshs3bljBggVt0qRJ7i80HgsLC7OwsDCrXLmytWnTxtlmZnb79m3nemfOnLGSJUta06ZNPVJnfBLxuFmxYoXNmzfPfvjhBzMzu3v3rjVp0sTKly9v06ZNs2vXrtm1a9esYcOGNmTIEJsyZYplz57dTp486any44Xg4GCrW7eupUmTxpIlS2a//vqry/J79+65rFunTh2rW7eu89hCzNq/f7/5+fnZwIED7c6dO/bTTz9ZggQJbO3atU9c/9atWzZkyBBLlSqVHTlyxM3Vxj8Rz3ErV660GTNmOK/bbt68ae3atbMqVarYuHHj7MGDB3bx4kULCAiwwYMH25w5cyx79ux2+PBhT5UfbwQGBlqmTJnM29vbPv/88yeuc+fOHTt9+rTVqVPHihYtao8ePXJzlfFXnz59LHfu3JYgQQKbNm2ay7KHDx+6PB46dKjlyJHDzpw5484S462I7/V9+/a1dOnSWdq0aa1YsWL2wQcfuKx77949O3funNWvX9+KFStmISEh7i4X/wChFCJ5+eWXLV++fLZ+/XpbunSpZcqUyerXr2/Xr183M7PTp0/b1KlTrVixYtagQQPndnw4iFkRLzo3b95sS5YssYsXL5rZ42BqxYoVtnnzZucFzL1796xUqVL21VdfeaTe+Cr8OBg+fLglT57cvv/+e+eykJAQ5/K7d+9a8+bNbfLkyR6pMz56++23LWnSpPbCCy+Yw+Gwt99+28wefwh45ZVXrEiRIpYkSRLLnz+/5c2b18zMdu/ebTly5LDjx497svR4YfDgweZwOCx9+vRPDDLu3r1rH3/8sdWsWdOKFCni/JDwpLAe0efIkSPm7e1to0ePNrP/e71r1qxpgwYNsjfffNM++ugj5/rr1q2zxo0bW/bs2W3fvn0eqTm+evvtty1JkiRWoEABczgcNnDgQLt7965duXLF/vOf/1iePHksWbJklidPHitQoICZme3YscOyZ89ux44d83D1cVP48RIaGmrnzp2zYsWKWb58+ax27dq2YcMG53rh126TJ0+2cuXKWeXKlZ3nOD5Ux6zwfbR7925LkSKFZc2a1caNG2c3btxwWW5m9vXXX9vrr79uadKk4fzmJhE/Xx4+fNiKFi1q+/bts+3bt9uwYcMsa9asNnToUOc6s2fPtvLly9uLL77IMfQ/hNv34GLr1q367bfftG7dOmXLlk1ffPGFgoKC1LdvX6VKlUqhoaHy9fXV7t27VatWLY0ZM0YSg2m7Q/jr+9Zbb2nWrFmSpESJEmnQoEFq1aqVAgICJD0eA+zPP/9Uz549FRYWplatWnmq5HjHItyiUq1aNc2ePVvvv/++fHx8VKFCBeetsWamUaNGaf/+/Ro7dqwnS47TIp6Xjh07pk2bNmnz5s3KkiWLtmzZotatWysoKEhTp07Vl19+qRMnTmjHjh1KkyaNGjRoIEmaM2eO0qVLxyyjMcD+cktX165d1bRpUw0bNkzVq1fX8uXLVaJECefyR48eKUGCBMqcObPWrFmjBAkSKCQkxGViB0SvsLAwrV69WmFhYSpUqJCkx+9Fo0ePdk4CcOTIEf3+++86cOCAvvjiC+XOnVu1a9fWuHHjnLNTIWZEPMft379fP/74ozZu3KhixYpp8eLFeuONN3Tv3j29//77mjhxoq5evaq1a9cqY8aMqlOnjiRp0aJFypgxo3NmZUSfiPtn+fLlqlChgvbt26erV6+qdu3azmvoGjVqOM9j3bt3V7Zs2RQQECBvb2/OcW4Qvo9SpEihVatWadmyZVq4cKHu37+vHj16KEWKFM59GRgYqJCQEG3dulX58+f3cOXxQ/h1wowZM7RhwwZVrFhRxYoVk/R4chpfX1998skn8vLy0rBhw1S+fHk9evRI7dq14xj6X+LhUAyxzJYtW6xUqVJmZjZy5EhLmTKlrVu3zszMLl26ZPPmzbOwsDALDAx0bsO31DEr4jcEmzZtsjJlytjWrVvt2rVr1r17dytQoICNHj3arly5YmZmU6dOtbp16/INgZscPnzYunfvbidOnLCbN2+6LJs+fbo999xzVrhwYZsxY4adOXPGVq9ebZ07d7ZkyZLxLVsM+es3/qNGjbK2bdtax44dXc5Xy5cvtwQJEliPHj3szp07Ltts27bNunfvbilSpLD9+/e7pe74JOJ+OH36tJ06dcr5+P79+1arVi3LlCmTy2s/cuRIO3funPMx5zX3uH79ur311lvm7e1tmzZtsilTpljKlClt1apVZmYWFBRkb775pqVJk8Z5CxjXBTFr7969Lo/HjBljHTt2tE6dOrlcM8ydO9eSJk1qffr0sbNnz7pss3v3buvVq5f5+/tzjosBEfdD//79LVOmTPbOO+84b+M/ffq0FStWzGrXrm1r1qyxR48eWfny5e29995zbsc5LmaF76MrV67YzZs3Xa7hevXqZcWLF7eRI0c627/88ks7fvy43b9/3wPVxm/Xr1+3zp07W9q0aa1Ro0Yuyy5dumSjR4+2bNmy2VtvveWyjGPofwehVDz2pAN106ZNljFjRuvfv7+lTZvWVq9e7Vy2bNkya9y4sR09etTZxi177jNnzhzr1auX9e3b16W9X79+VqBAARszZozdu3fPjh49anPnznXuX8YjiDkhISFWqlQpczgcVq1aNWvbtq0tXLjQ5diaM2eOVa1a1by9vS1p0qSWK1cuq169uh08eNCDlcddL7/8srVs2dKlbdy4ceZwOKx48eLODwTh567ly5ebj4+PtW3b1oKCgpzbfPPNN1anTp1IYxsheg0cONAKFSpk/v7+1q9fP2dQGxwcbLVr17bnnnvOpk6dalWqVLGCBQtygekhN2/etN69e5vD4TBvb2/76aefzOz/riMWLlxoWbNmtT/++MOTZcYLr732mnXt2tWlbfjw4eZwOKxw4cJ26dIll2Xz5s2zFClSWKdOnVyWzZ8/35o3b845LoZNmDDBUqdObXv37nV+oRt+3Jw5c8bKli1rBQsWtLx581rBggUtODjYk+XGG+HXAMuWLbNy5cpZrly5rFSpUi7jE/Xu3dtKly5tr776qvXs2dMcDof99ttvnio5XnnS58vDhw9br169LGnSpDZ16lSXZZcuXbJBgwZZ48aN+Wz6P4pQKp6KeGE/YsQI+/TTT52P27VrZw6Hw+bMmeNsO3/+vBUrVsy6devm1jrxf+rXr28Oh8OqV6/uMuC82eNgqmDBgvbOO+/Y3bt3ne18gIt5s2fPtqFDh9ry5ctt/PjxlixZMnv11Vft3Xffdb4x3rlzx3bv3m3ffPON/fbbb5F6VCH63Lt3z3lRf+nSJec++OKLL8zhcNjo0aOdbeH/XbRokVWsWDFS746IIRWiR8TXeO7cuZYlSxabP3++TZw40XLkyGEtWrSwH3/80blOq1atrGzZsla/fn3GkHKToKAgO3v2rK1bt862bt3qbL97964NGzbMvLy8bMWKFWb2f/uiX79+VqFCBefYk4g5586dc57jIoaAn3zyiTkcDhs1apTLpBpmZp9//rnVrFkz0oc1znExKzg42Jo3b+4MOsKPl4jXZufPn7eZM2fa1KlTnV8i8mWie6xbt858fHxs/PjxNn36dBs+fLiz93S4ESNGOCdDOXDggAerjT8ivsdfvnzZJUw/ffq09ezZ0/LmzWvTp0932e7GjRuRru/wv4NQKh6KeKA2atTIihQpYnPmzLFr166ZmdmhQ4esevXqlixZMhs0aJD17t3bChYsaPXr13/icyD6Pe317dSpk2XKlMk+//zzSLcbde7c2Vq3bs2+cbMtW7ZY7ty5bffu3WZmdurUKfv000/N4XBYhQoVbOTIkQwg6yYRL/Q//vhjy5Ejh+3du9d5THz00UfmcDhs3LhxT71wCQ0N5Rhyg+3bt9tbb71ls2fPdrZt2rTJihQpYi+//LJt27bN2X7hwgXnPuHDWsw6cuSINW3a1AoXLmze3t7OXqDr16+30NBQe/DggfXu3du8vb1tyZIlZmb2zjvvWOLEibkFzA0izgA2Y8YMK168uH333XfOtrFjx5rD4bCxY8dGCqbCcY5zn6CgIMuePbsNGjTI2Rb+2ofPEBaxzYwvE90lNDTUectrRMuWLTNvb2+XmauDg4MjXXMjZkQ8Ft59910rWrSo5cyZ04oWLWrr1q2zsLAwO3PmjPXs2dPy5ctnn3322d8+B/53EErFY3379rX8+fPbhQsXnG3hFzzXrl2zwYMHW5UqVax169Y2cuRI5zp8Sx2zIr6+Z8+etYsXL9rly5edbS+//LLlz5/fZs6c6dIrysz4hsBD+vbta1WqVHHuj5YtW1ru3Lnt9ddft1q1apnD4bD33nsv0pTCiD5/PS/dvn3bsmfPbqVKlbJ9+/a5BFPe3t42YcIEjhMPCAsLs8OHD5ufn58lSJDAxo0b57I8PJhq1aqVy8yV4dsi5uzfv9/Sp09v3bp1sxUrVthvv/1mixcvtpw5c1qWLFlsyZIlFhYWZnfu3LF+/fqZr6+v1alTx5IkSWJ79uzxdPlxXsT//3/66Se7evWqFSxY0OrUqeMyi9sHH3xgXl5eNn78eLt165YnSo2XnnRtfO/ePWvZsqW9/PLLdv78eZdlP/30kzVr1szlGhzu8/DhQ3vxxRetffv2zrbwQLB///5WtWpVu3nzJu87HjJixAhLnTq1zZ4925YsWWLNmze35557zhlCHTt2zPr06WMpUqSwZcuWebhaRAdCqXjkr9++1K5d26ZMmWJmZrt27bIJEyZY3rx5rXLlys7puP86mB+BVMyK+OY3dOhQK1WqlKVNm9aqVKliEydOdC576aWXrECBAjZ79uxI3e95A405f/75p33xxRf22WefufTk2LRpk9WuXdsuX75sbdu2tXTp0jnHHbh27Zp9+umnzgGAEf0inpe2bNliv//+u5k9/pY6T548Vrx4cZdgasqUKeZwOGz+/PkeqTe+eVJYvnTpUsuQIYM1bNgw0rGxefNmS58+vQ0ZMsStdcZnBw8etMSJE9s777wT6T3k2rVr9vzzz1v+/Pnt5MmTZmZ269Yt5weCvw66jegXcZ8MGTLEkiZNanfu3LE//vjDihQpYjVr1nQJpsLH0Zs3b54nyo13Ir4HHT161Pbt2+e8TX/16tWWMGFCe+edd5zHz7Vr16xRo0ZWs2ZNrqs9aMyYMfbCCy/YL7/84tL+wQcfWNGiRRnfy03OnDnj8vjGjRtWsmRJl6FlzMx69OhhqVOndo7J+uuvv9rkyZPpXRhHEErFExEvaN588007cOCAtW/f3goVKmRvv/22lS5d2gICAmzChAlWrFgxq1SpUqTtCDtizl9f2xEjRliqVKns22+/tZkzZ9rbb79tPj4+NnToUOc6rVq1slSpUtmaNWvcXW68dODAAcuWLZuVLl3aUqdObbly5bLFixc7l1eqVMm8vLwsS5YsjDvgRhGPnQEDBlixYsXs888/d34gCAoKsty5c0cKpr7++mtuBXODiB+4bt68aYGBgc4LyMWLF1umTJnsP//5jzNIDLdv3z4uNN3k3Llz5nA4rE2bNs628OMkfB8cP37cOYtbuBs3bjhv+4d77N2719q1a+cy1tfTgql58+ZxjnODiO9BgwcPtvz581u2bNksT548NnjwYAsNDbXZs2dbhgwZrFSpUla6dGkrVaqUFS5cmHHy3CT89b127ZpLz7Sff/7ZKlSoYB06dHAJpnr16mV16tThlj03aNSokcvdOGaPxwPNli2b8xo7YgeJsmXL2muvvRbpebhe+N9HKBUPRHyzGzx4sBUsWNBOnjxpGzdutC5duliBAgXsq6++suPHj5uZ2YcffmgtW7bkYsZN/noxcuvWLatWrZrLfdJBQUE2bdo0S5o0qcs3n8OGDeNE7AYHDhywxIkT24ABA+zu3bu2YcMGy5Qpk9WvX985bseuXbssb968NnPmTM8WG08NHTrU0qRJY5s3b450W2tgYKDlyZPHSpUq5Zw1LBznuZgT8dw2duxYq1atmpUpU8Zq1KjhnJ4+YjAV3kM3Is5v7lGwYEErVKiQbd++PdJrHn6MvPzyy1a/fv1IE23APRYvXmwlS5a0okWLOm/pDw81/vjjDytatKjVqVPHVq1a5bId5zj3GDt2rKVLl84ZDDZv3tzSpk3rHG/yxx9/tM8//9x69+5t06ZNY1DzGDZ37lyXAOqbb76xfPnyWZYsWaxEiRI2ZcoUCw4OtlWrVlnFihUtd+7c1qRJE2vUqJElT56cMfLcZP369c4eaREnyqhYsaLVrFnT+Th8nTZt2kQaBwxxA6FUPLJ69Wp75ZVXXAbFNHt8z3u4a9eu2QsvvGBvvfWWu8uLl5o1a2YdO3Z0abtx44alTZvWxowZ49J+8+ZNa9SokfXp0yfShwY+uMWcP//809KkSWMvvfSSS3upUqXs+eefd47ZcefOHStbtqwNHDjQzOhZ6E7Hjx+3IkWK2Pr1683s8WwtP//8sw0ZMsQWLVpkZo+DqWTJkrmMHwH3GDRokKVNm9ZmzpxpmzZtskyZMlmhQoWcvWwWL15s2bJls1atWkXqxo+YFfG9o2TJkpYnTx7bvn27M1CMeB4LCAiwevXqub1GPLZw4UKrWLGi+fn52aZNm8zs8f4JDzX++OMPy5Ahg/Xu3duDVcYPp0+fdv47LCzM7t+/b3Xr1rVp06aZ2ePr7eTJkztnBwsODn5ibyiu3WLGjRs3LFmyZFaxYkW7du2a7dmzx1KkSGEjRoywlStXWrt27ax48eLWq1cvCw4Otv3799vHH39sjRs3tn79+jHcgptEfH+ZNGmSvfLKK/brr7+amdl3331n+fLls3bt2rls8+KLL1q/fv3cWifcg1Aqnpg9e7ZlzpzZ0qZNazt27DAz1zfDixcv2vz5861gwYIWEBDgbOeDdcw6dOiQM/2P+Fp36tTJmjZt6uy9Fq5du3bWuHFjt9YY3506dcpKlSplDRs2dE5VP2rUKHM4HM7bXtu2bWuLFi2ymTNnmsPhYIyVGPbXi/tr165ZgQIFbOLEibZr1y5r27atFSpUyEqWLGkOh8NmzZplZo8DeD4EuNfp06etRIkSzsBw5cqV5u/vb5988onLejNnzrTGjRtzG4sHROypUbJkSXv++eddgqnQ0FC7ePGi1a9f37nfuDaIWU87Dr777jsrX768VapUybZv3+5sD9+HFy5c4BwXwxo1amTvvfeeS1tQUJCVLl3aTpw4YZs2bbKkSZM6A6kHDx7Yxx9/bPv27fNEufHW0aNHLVeuXFazZk1bvHix9e/f32X5uHHjrEiRIvb5558723j/8YyQkBBbsmSJpUuXzrp3724nT560kJAQmzFjhuXKlcsKFChgLVu2tLJly1r+/PnpXRhHeQlxUlhYmMvj1157Ta1bt1ZISIi+/PJL3bx5U97e3s71jh49qp9//lm1a9fWihUrnM/hcDjcXnt88sILLyhRokSaMmWKihYtqtDQUElSlSpV9Ntvv2nGjBk6duyYJCkoKEinT59Wrly5PFlyvJM9e3bNmzdPDx8+1NixY9W5c2dNmjRJixcv1tdff60OHTooV65c6tatm6ZPny5J8vf393DVcVdYWJi8vB6/df300086deqUkiRJosqVK2v27NmqUKGCUqRIoTFjxmj37t2qX7++fv/9d5mZ/Pz85O3t7TzOEPNu3Lihc+fOqVatWlqzZo1atWqlDz74QG+88Ybu3LmjKVOmKCQkRO3atdPSpUvl5eUV6f0LMStBggQKCQmRJP38889Knjy52rVrp127djmPtylTpujUqVOqV6+eJHFtEIMinuO++eYbffrppxo+fLiuXbummjVr6t1335Wvr6/ef/997dq1S9LjfRgaGqoMGTJwjoth3bt3V//+/SVJ169flyQlTZpUadOmVePGjRUQEKApU6aoa9eukh6fA7/++msdOHDAYzXHNyEhIXr++ee1du1anTx5Ui1atNCJEydc1unXr5/y5cunL7/80tkWftwhZq1evVp//vmnJGnQoEEaOXKkmjVrpilTpmjp0qUaN26czp8/rw4dOmjlypWqVKmSUqRIoapVq+rgwYMu71mIQzydiiFmvffee87bV8zM+vTpY0WLFrWRI0c6x8IJd+nSJee/+bbAfUJDQ23Hjh2WOXNmq1KlisvU9YUKFbLChQtbnTp1rEyZMlawYEHnNwR8U+1eR48etZo1a5qvr2+kaezNzK5evWqLFi1yzq6D6BfxvDRw4EArWbKkzZ0718we9/Y8cOBApF5q5cqVe+L+QvR70jnp7t27VqtWLXvzzTctadKkLmPlHTp0yGrVqmWbN29+6vaIXn/3Gv+1x1SePHls//79NmTIEEucOHGkGaoQs9566y3LnDmzNWzY0IoUKWJZsmSxJUuWmJnZihUrrHbt2la/fn3bsmWLhyuNnyZPnmytW7d2zgS2fft2K1KkiJUqVcq5zu3bt61u3bpWsWJFerDFoPBrg8DAQGfb3r177ebNm3b8+HErWrSo5cqVy44cOeJyDpwzZ44VKFDAOTEKYt61a9esSpUqljFjRuvYsaP5+Pi4vLeEjzPZtWtXO3bs2BOfg2MpbiKUisOOHz9uNWvWtDJlytiKFSuc7d27d7eSJUvaqFGjIgVTZnwwiGlbtmyxlStXmpnZ66+/bsOHDzczs927d1uOHDmsQoUKznU3btxoU6dOtU6dOtmoUaMYGNPDTpw4YbVq1bK6devatm3bnO3hg83CPYYMGWJp06a1DRs2uFyEhgsMDLRDhw5ZnTp1rGjRohwvbhAxMBw9erTzPScoKMheeeUV8/HxsR49ejjXuXfvntWrV8/q16/PlyBuFP7+/v3339uuXbsivfYRj5WyZcuaw+GwZMmScUuym82dO9cyZMjgDDy+//57czgcLtdyq1atshIlStibb77pqTLjtfnz51u6dOnsjTfesGPHjlloaKh9+umnliNHDnv++eetZs2aVrZsWStatKjzGoEP0zHn/PnzVr9+fVuzZo0tW7bMHA6H7dq1y8wefx7KmTOnVaxY0Q4fPuzcH507d7bSpUszy56bHTt2zNKnT2+JEiWy1atXm5nrDHuLFy+2LFmyWLdu3ZjNOh4hlIpDnnRhv337dmvZsqWVKVPGli9f7mzv2bOnlSxZ0gYMGOByIkDMCQsLs+vXr1v58uWtbt261qxZM0uSJInLCTdiMPW0cJCLGs86duyY1alTx2rXru0cYwru8/vvv1v+/PmdEzZcu3bNfvnlF5swYYItW7bMzMxmzJhhAQEBVrVqVT4MuEHE955jx45ZjRo1LFGiRLZx40YzezxZQLly5axMmTLWuXNnGzlypFWqVMkKFSrElOgesG3bNnM4HLZkyZInHhcRg6kmTZrQQ8oDxowZY507dzYzs3nz5lny5Mmd43ndvn3bGcZv27aNY8cNnvYaL1myxDJnzmydO3d2Dn5+9OhRe+utt2zIkCE2bdo05zHGlyMxa9++fdasWTN74YUXzNfX1+bPn29m//e6Hzt2zHLmzGlZs2a12rVr2xtvvGFp0qTh/OZG4Z9rjh8/bqVLl7bSpUtbrly57MSJE2b2eEKA8HW+/vpr8/b2pqd7PEIoFQf99fah7du3W4sWLaxMmTIuUwW/+uqrNnToUHeXF+8dP37ccuTIYV5eXjZlypRIy3fv3m05c+a0KlWqcBETSx07dswaNGhgZcuWtZ07d3q6nHjl7NmzVqRIEZsxY4Zt377dOnTo4JzOPmHChLZs2TK7du2arV27lg8DbjZgwAArV66cNWjQwFKmTGk+Pj7O95xTp07ZO++8YxUqVLDGjRtbr1696PnpAUePHrVVq1bZqFGj/nY99olnhIcfHTt2tPbt29uuXbssWbJkLhMDTJw4MdJA2wRTMSfia3vkyBHbvXu3Xbx40XmMLFq0yBlMHTly5InPwZci7hE+2UyePHmcdySY/d/rHx6GOBwOW7t2rcssiog5fz0/hYSE2J07d+zgwYNWs2ZNy5EjxxOHvti5cyfHTjxCKBXHjB8/3sqUKePsshpu27ZtVrFiRStevLhzFqSIuGXPPUJDQ+3IkSNWpUoV54xuEXuwhdu9e7f5+fnZf/7zHw9UiWfx+++/W/PmzZnCPgY96YPWjRs3rGnTpla8eHHz8vKynj172urVq+3mzZtWo0aNSB+2+bDmHnPmzLHEiRPbzp07LTAw0A4ePGht27a1hAkTOoOp0NDQSO81XHC6z6VLl8zPz8+8vLxs0KBBni4H9vTz07Zt2yxbtmzmcDhsxowZzva7d+9a/fr1rXfv3u4qMV6LeL4aMGCA5cuXzxInTmzFixe3Nm3aOO80CA+munbtaocOHXJuw/uPe4S/j6xcudKmT59ubdq0sQoVKriMqRu+zpEjR6xIkSJ29uxZj9Qa30Q8Bvbv32+HDh2yw4cPO9t++uknq1WrluXKlcs543irVq1s/PjxznW4TogfHGZmnh5sHdFn4cKFmjlzphIlSqR33nlHZcqUcS4bN26chgwZoowZM2rx4sUqWbKkJMnMmEknBkWcSSeiQ4cOqUePHvLz89Mbb7yhgIAA57LQ0FAdP35cefLkkbe3tzvLRRQ8fPhQiRIl8nQZcVLE42b16tU6ffq0kiVLpsqVKytDhgz65ZdfJMnlHFeuXDm1aNFCvXv39kTJ8dqIESO0Y8cOrVu3ztl26dIl9ejRQ6tWrdLatWtVpUqVp54PEfMePXqkpUuXql+/fipatKhzpl2uATwj4rGwatUqnTx5UsmTJ1fhwoVVokQJ9erVS+vXr9frr7+uTp066dixY3rnnXd06dIl7d69WwkSJGDfucnEiRM1cuRILViwQGnSpNHWrVs1b948+fv7a9WqVfL19dU333yj3r17q0GDBurfv79y5Mjh6bLjvPD////6vrJr1y5NmjRJFy5cUK9evdS8eXNJ0vr161W2bFklTpxYCRMm9FTZ8UbE89PQoUO1ZMkS3bt3T4kSJVKvXr3UrVs3SY9nfR06dKg2bdqkokWL6vLlyzp27Bj7KL7xYCCGf+lpyfHy5cutZs2aVq9ePZdbi+bMmWOtW7e2OXPmuKvEeC/iNwSHDh2yzZs325UrV5zfru3evduqVKliAQEB9u2335qZWdWqVfmGAPj/wmegqlOnjlWoUMHy5s1rCxYscC6/e/eunThxwurUqWPFihXjtiM3CD+vRTy/jR8/3tKkSWO3bt0yM3MZF8LhcJiPjw+z7LnZk17nkJAQW7x4sfn5+dkbb7zxt+vCPd566y3LmjWr1ahRwwICAix16tT2/fff25kzZ6x3796WLl06S5EihRUuXNiqVavGOHluEPF4CAkJscaNG9v777/vbHv48KGtXLnSihUrZu+++65z/aVLl1q6dOlsxIgRkZ4H0Sv8td2yZYsNGjTIunfvbvPmzXNeA+zcudNatGhhFSpUsEmTJtm7775rDoeDHlIeMHz4cEubNq1t3LjRzpw5Y506dTKHw2EffPCBc53z58/btGnTbPTo0dzaH08RSv2PingxMmPGDJs0aZKNGzfOOZve2rVrrWbNmla9enVbtGiRHT582EqWLOkyLgFvljEr4us7cOBAy58/vyVLlswqVapkQ4cOde6r3bt3W+3ata1AgQKWN29ey5s3rwUHB3uqbCDWmDdvnmXMmNF27NhhZmZTpkwxHx8fW7x4sXOdqVOnWq1ataxy5cp8WHODBQsWWPv27e3o0aMuMxbt3bvXSpYsaW+++aZdunTJ2b5z507r2rWrde3a1XLmzMkHAjcJf//ZunWrjRs3zrp162bbtm1z7ptFixaZr6+vdevWLdI2cJ8FCxZYxowZnV8gTps2zRwOh3311Vdm9jh0v3Dhgq1Zs8YOHTrkDIL5sBZzIh4Hu3btsvv371v16tXtlVdeibRu27ZtrV69ei5tY8aMsTx58jxxdmtEr2+++caSJUtmr776qtWuXdvKly9vXbt2dV4L7N6927p06WL58uWzF154wfbs2ePhiuOfX375xapXr24bNmwws8ezhqZIkcKaN29uDofDxo4d+8TtuI6Lfwil/scFBARYzpw5rUyZMpY+fXrLmzevrVmzxszMNmzYYG3atDFvb2/LlSuXBQQEOLfj4tN93nvvPUufPr1t2LDBHjx4YC1atLCMGTNat27d7ObNm2Zm9ttvv9mCBQts8uTJfEMA/H+DBw+2tm3bmtn/XXxOnz7dzMzu3Llj58+ft+vXr9s333zDoOZucPv2bcuVK5elTZvWChUqZB07drSZM2c6l0+cONFKly5tHTp0sAMHDtiRI0esfv361qFDB9u8ebOlS5fOvv/+e8/9AfHMkiVLLHHixFa3bl174YUXLHPmzNahQwfnYMyLFi2y5MmT22uvvebhSuOvoUOHWqdOnczM7Ntvv7WkSZPap59+amZmgYGBTxw4m3GKYs5fx5AqW7asHT9+3AYMGGAVK1a0ffv2ubz+EydOtEqVKtndu3ed244bN84KFCjg7DWKmLFr1y7Lnj27ff7552b2eAKalClTWsaMGa1Vq1bOYOrq1at2+fJlu3LliifLjTf+en66dOmSjRs3zu7fv2+bNm2yjBkz2rRp0+zBgwfWsGFDczgcTLoFMyOU+p82evRoy5Mnj129etXu3btnDx8+tDp16ljevHmd3wZcvXrVfvnlF9u2bZtzOy5oYlbEi5qjR4+6zHq4YcMGS5IkiTVs2NDy5s1rvXv3fuK3aXxDgPjmSeelIUOG2HvvvWffffedJU2a1BlIhYaG2ty5c238+PEuxwrHTcwKCQmxgQMH2vTp023v3r02btw4S5EihbVo0cI+/PBDCwkJsQkTJliDBg3M4XBY7ty5rVChQmb2+MI0T548tmXLFg//FfHDyZMnLXfu3PbZZ58535M+//xzq1atmnXu3NmuXbtmoaGh9tVXX1nGjBnt4sWLHq44fnrvvfdswIABtnz5ckuaNKlNmzbNzB5fRyxYsMDef/99CwoK8nCV8c+xY8esRo0azluOz5w5Yzly5LCAgADbtm2bPXz40AIDA61q1aouoW5wcLB9/vnnLgM5I2bMnTvX2Xvt1KlTljNnTmvXrp2NGzfO0qRJY507d3YGU3CPiNdxW7ZscQaB9+7dMzOzrl27WpcuXezBgwdmZtarVy+rUKGCVa5cmc4SIJSK7f7uIO3Zs6e9/PLLZmYuJ97SpUtbjRo1nrgNgVTMiri/Dh48aLdv37bFixfbjRs3bOvWrZYuXTr77LPPzMysdu3aliZNGmvTpg0XnYjXIp6XNm3a5OxBOGvWLHM4HJYwYUKbNWuWc53AwECrUaOG9e/f392lxntr1qyxZMmS2YEDB8zM7P79+zZkyBBzOBz24osv2tixY23nzp32888/2y+//OLct2+++aYVLFiQ8COG/PVaYf/+/S63hYX79NNPLVOmTPbLL7+Y2eOgMTAw0F1lxltPu/b67LPPzN/f33x9fZ2BlNnjXom1atXiHOcBY8aMsTJlyljNmjXt6tWrzvZjx47ZCy+8YEWKFLEcOXJY6dKlrVChQs7r7yeNtYeYdeDAAXv06JHVqlXL2as6KCjIcufObT4+Pta+fXvPFhiP/HXIksKFC9v48eOdw5E8ePDASpcu7RzL8N69e9akSRNbunTpE58D8Q9T4MRiFmHWgrVr1+rDDz/UsmXLnMtv3rypc+fOSZISJkyo+/fvS5IGDhyoo0eP6sKFC7K/TK7IrEcxJ+L+evPNN9WjRw/dvn1bAQEBSpkypebOnatmzZqpffv2kqT8+fMrZ86cSpcunRInTuzJ0gGPMTPneWnQoEHq0KGDvv76az169Eht27ZVv3795HA4lC5dOh07dkxHjhxR8+bNdePGDY0cOdLD1cc/devW1auvvqpPP/1UkpyzTjVq1EilS5fW999/r/Lly+vw4cMqWrSofvzxR/3nP//Rl19+qa+++krp06f38F8Q90R87/ntt9/04MEDJUiQQF5eXrp3756kxzOFSlKXLl2UMGFC57WEt7e3kiVL5pG644uIM4MtXbpUCxcu1PLlyyVJnTt3VsuWLfXo0SNlyJBBv/32m37//Xe99NJLunbtGuc4D6hUqZIOHjyoH3/8USdPnpT0+BjLkyePNm7cqPfee0+dO3dW165dtW/fPiVMmFAhISHOfcx1dvQLCwuTJD148EAPHjxwthcuXFinT5/W2bNnndfWd+/eVfHixTV27FgNHz7cI/XGJ+H7Jvw96P3339enn36qadOmqX379s4Zqn18fNSyZUt99tlnevXVV1WpUiWdOnVKDRo0kMQssJASeLoAPFnEg3PYsGFauXKlihYtKh8fH+c6//nPf9SgQQO9++67evfdd+Xn5ydJCg4OVurUqZUoUSIOcDcKf63PnTunffv26b333lOWLFmcy69evSrp8YcASbpw4YJ69OihNm3aPHFKWyA+CD9uRo8erc8//1zLly/X888/75wK+K233tLNmzf18ssvy8/PT1myZFGSJEm0a9cuJUiQQKGhoc5jCu5RvHhxzZw5Uzdv3lT16tWVMmVKzZ49W8mTJ9f58+e1bds25xTcvr6+8vb21vbt25U/f34PVx73/PHHH3rzzTe1dOlSffvtt+rXr59WrlypF154Qfnz51ePHj20bds2pUqVSpJ0//59ZcqUSVmzZvVw5fFDxND9zTff1KxZs5QyZUo9ePBAy5Yt08yZMzV9+nTdunVL3bt3161bt/TCCy/I19eXc5wbPOm6q1y5ctq+fbtefPFFffDBB5o8ebLzeEmXLp0CAgIUEBDgXD80NFQJEvBxKrrt3LlT+fLlU8qUKeXl5aUVK1boyy+/1NWrV9W5c2c1atRIKVOmlJ+fn0JCQrR69WoVLFhQU6ZM0cWLF9WqVSulTZvW039GnBfx+Ll27Zo2btyoiRMnqnz58s728OPs1VdflZeXlzZu3KhSpUrpww8/5ByH/+OxPlp4JkOGDLHUqVPbjh07nPfghgsMDLQxY8ZYzpw5rVevXvbbb7/Zjh07rFChQnRZ9ZAPPvjAypUrZ3Xq1LFr166Z2ePuqKGhoTZixAgrXbq0c4aQAgUKOMfAobs34rOgoCCrWbOmTZky5anr/PTTT/bDDz/Yzz//zAxUsUCpUqXM4XBY5cqV7fr1609cJ3z/MJtozNm9e7elSpXKihcvbg6Hw+bOnetcduHCBStWrJjlzZvXli1bZt99950NGjTIUqVKZcePH/dg1XHfX29DuXTpklWqVMl+/fVX++OPP2zBggXm7+9vLVu2dK6za9cu++GHH+zAgQOc49wg4nXXtm3bbOnSpXb48GHnLXs7duwwX19fa9Wqlf35559P3A7RLywszH7++WdzOBz2/vvv28OHD23btm2WNGlS69q1q73yyivm7e1tffr0sT///NNCQkJs+PDhliNHDsuUKZOlT5/e9u7d6+k/I8575ZVXbNSoUS5tFy9etDRp0rhMgBLuwYMHzlvFI14TcI5DOEKpWGzHjh1WoEAB52x6T3L58mX7/PPPLVOmTJYmTRrLnTu3c5wpM+7PjWl/vThZt26dpU6d2lKnTm1Hjx51WRYUFGQjR460Dh06WJcuXZwnYgZnRnx34cIF8/f3tzlz5piZ63nr3r17T5zFiA8GnhG+b7766isrWLCgc1IN3ms8Z/To0eZwOKxgwYLOtvDj4/Lly9agQQPLlSuXZcuWzYoUKWL79u3zVKnxQsRxiMwez9BWvXp1e/XVV51fLgYHB9u3335r/v7+1qJFiyc+D+c49+jbt68999xzljp1anv++eetWrVq9ttvv5mZ2fbt283Pz8/atGljf/zxh4crjfsivo989NFH5uXlZRMmTLCJEyfapEmTnMsWLVpk/v7+1qNHD7tx44bdv3/f9u3bZ0uXLrUzZ854oPL45datWzZ79uxIA8mfPXvWihYtaiNGjIgUNm3evNn69evnMrkT1w2IiFAqFlu4cKHlyJHDzp8//8TlEQ/m+/fv2549e5xvpGZc0LjTyZMnnRebW7duteTJk1vr1q3tzp07ZvZ/++KvJ2C+IUB8E/EYCP/3w4cPrWbNmtaxY0e7ceOGmf3fMbNx40YbOHCgc/YWxA7nzp2zDBky2OjRoz1dSrwUfuyEhITYqlWrbNiwYZYjRw6rVq3aE3vZnDlzxo4fP+7swYuY8cYbb1idOnXM7PE+unfvno0fP94yZMhgJUqUcFk3ODjYli5daqlSpXJug5gX8T1o3bp1VrBgQdu6datdunTJFi1aZHXr1rX8+fPbkSNHzOxxDzaHw2HDhg3zUMXxQ/h56+LFi/bzzz/blStXbO7cueZwOCxz5sw2efJkl/UXLlxoyZIls969e7v0ZEPMOn36tJn93xfq06dPd5mBcsiQIebj42OLFy92fi4KCgqygIAAa9GiBUEUnopQKhZ75513LH369M7HTzqQ169fb4sXL47UzkEfsyIGfkuWLDE/Pz9btWqV8wS8ceNGS5IkibVv397u3r3rXPdJH8iB+CLicXPv3j3nLHtmZqNGjbLcuXPblClTnLNR3rlzxwICAqx+/focL7HQRx99ZKlTp2b6czcLPxY2btxoo0aNcn543rFjh2XLls2qVq3qsv6PP/5o9+/fd3ud8dHZs2edvQfCb1W5fPmyffLJJ+bj42N9+/Z1WT84ONjmz59vtWrV4otEN5s/f7717NnTunXr5tK+fft2q1atmnXp0sX5Zchvv/3Gl4gxKPz//cOHD9uLL75oNWvWtCZNmpjZ4xkqHQ6Hy5dW4b7++mtzOBw2cODASL12EP1Gjx5tXl5edujQITN7fI02YsQIy5cvn8tx1L17d/Px8bHmzZtbq1atrEKFClawYEHnPuJ6Dk/CqMqxzLRp0/Sf//xHkpQvXz7dvn1b69atcw58bhFm0zMzbdiwQXv37o30PAxwHnMiDow5b9483b17Vw8ePNBbb72lH374QQ8fPlS1atW0fPlyLVmyRD179tTdu3clue4X9hHik4jHzejRo1W/fn0VL15cbdq00e7duzVw4EA1bNhQ06ZNU7Vq1dSqVStVqVJFp06d0tKlSyOd/+B59erVU/369ZUvXz5PlxKvOBwOffvtt2rQoIFCQkKcs1GVK1dOCxcu1B9//KFq1arp+PHjGjx4sDp06KCbN296uOr4IXPmzEqYMKFmz56tjBkz6ty5c3ruuefUokULTZgwQTNnzlT//v2d6ydKlEgvvfSS1q9fLy8vL+dMVohZZqZPPvlEU6ZM0cGDB11e9/Lly6t8+fL68ccfne358+dXggQJFBIS4qmS4yz7/5MBHD58WC+++KIqV66sL774QosWLZL0eIbKjz/+WF9++aU+/fRT3b5927lt8+bN9c0336ht27bOyVEQc2rUqKGAgADVrVtXhw4dUpIkSfT666+rS5cu2rJli/Pz65QpUzR16lRlzJhRYWFhqlq1qn755RfnTJV8/sETeTIRQ2TffvutNWjQwE6dOuUcMK5WrVp27NixSOv++eefVqVKlScOKIeYN3jwYEuVKpXNnDnTxo4daxUqVLB06dLZmjVrnIP4bdy40RwOh40cOdLD1QKxQ/jkDePGjbNp06ZZ4cKFrVy5crZ06VIze9zz8O2337bXXnvNZVwCvqWOnSLeRgb3+PXXXy1z5sz22WefPXH53r17LXfu3JY9e3bLnDmz/fzzz26uMP75ay+nY8eOWdmyZS1Xrlx27tw5MzO7du2affzxx5Y6dWp7++23PVEmzPWc1bp1a3vuuefsiy++cA63YGa2YsUKK1CggJ09+//au/O4nNL3D+Cfp1UkNWIINRhKtjCTdQwxYxnKMhHDMNnSQhhFlrFkz5Js2UJEllEY+z5ZBhGikbEObbJUilJdvz/6daYs853vd3ie6PN+veYl5znnzPU8Ofdzn+vc9339qakwi5WHDx9KixYtZNiwYYW2F/ze9/f3F5VKJdOnTy+0LhGp18WLF8Xe3l7MzMzk0qVLIpK3jp6fn5/UqVNHXFxclH1f7hewn0B/h0mpIubmzZvSsmVLZUG/8PBwKV26tHTo0EGOHDkiInnTXqKioqR27dpvXCCT3q27d+/KJ598Ihs2bCi0vUOHDlKhQgXZvXu3MpXv3LlzvKGmYiv/BiA3N1du3boltWrVkp9//ll5PTk5Wbp06SKNGzd+47oQ7MgQ/SUsLExq164tcXFxyraXkyKZmZly+PDhQvvQuxcREaG0eTdv3pQWLVqIhYVFocTUkiVLRKVSyeLFizUZarGW/52SnZ0tnTp1krp168rcuXPlzz//lFu3bomdnZ20adOG04zU5MqVK1K9enU5duzYK21ZTk6O8ntYuHChaGtry7hx45iYUrOC18KFCxfemJiqV6+euLu7aypMeo8xKVUEbd68WYyNjSUqKkpERLZt2yYmJiZiZGQktWrVkjp16kidOnXE0dFROYZrEajX7du3pUKFCnLgwAER+au86bNnz6RmzZpSp04d2bt3b6FGnIkpKm4Ktks5OTkSHx8vFhYWsmPHDhH567p58uSJlCtX7pXywkT0l/zradmyZVK1alVlnaiCSdsTJ07IxYsXNRJfcVSwjbt8+bKoVCqZP3++sv3GjRuvJKaSkpJk27ZtTLarwd9VzMv//F+8eCEODg6iq6srVapUke7du0unTp2UB4vsX797GzZsEB0dHaXP/LrPPD09XRISEmTlypVibGzMog1q8qZ//xcvXpROnTq9kpiaN2+elC9fXubOnavOMOkDwDWlNOx16wd8++236N27N7Zs2YLMzEx069YNFy9exIQJE9CyZUv06dMHvr6+2Lx5s3KO/LVa6O2T16xjY2FhgXLlyiEwMBBA3roQ+fOkLS0t8fjxYwwcOBD37t1TzqGjo6PWuIk0Lb9dGjJkCDw9PZXtERERAPKumxcvXqBMmTKwtbXFw4cPNREm0Xsh/3pq2rQp7t69i4CAAACAtrY2gLzvmS1btuDo0aNc+0YN5P/XwgGA2bNnIzw8HHp6evDy8sKcOXMAANWqVcPatWthbm6Oli1b4u7duyhXrhy6desGbW1t/p7eoaCgIFSvXh1Llix57eva2trIycmBjo4Otm3bhu7duyM3NxcdO3bE5s2boa+vj6ysLPav1eCTTz6Bjo4Ofv75ZwB47We+YsUK9O3bFwMGDMCNGzdQtmxZdYdZ7BS8vwwLC8O6desQEhKC9PR01KtXD35+fmjQoAHat2+Py5cvw9TUFL1794a/vz+GDx+u4ejpvaPZnFjxVvAp2dmzZwtVolqzZo3Y2tpKUlLS356DT3DerYKfb2xsrPzxxx8SExMjInlTKKpXr15omGpOTo706dNHoqOjpX79+uLk5KT2mIk0reAIwdjYWLGyspJDhw6JSN4TUV1dXVm4cKGyT3Z2tjRo0IBrrxH9v9zcXOU6ioyMlKCgIFmyZIkyCmr69Omip6cn06dPlwcPHsjNmzfFx8dHTExMlGp8pB6TJk0SU1NT2bFjh2zatEnGjBkjWlpahdqzmzdvipWVlXTr1k2DkRYvgwYNEpVKJTo6On87aqPgVL727duLjY2NbN26tVDlZHq37t27J+XLlxd7e3u5ffu2sr1gX2LUqFEyevToQtP56N15+bMvXbq01K9fX/T09OSLL75QlmGIiYmRzp07i7m5uURGRhY6B0eD0n+DSSkNKXih9urVS1q3bi0nTpxQprOIiHz11Vdib2+v/J2NsHoV/LwnTJggn332mZibm0ujRo1kxowZIiKyaNEisbCwkCZNmoi7u7s0btxYatWqJbm5uTJo0CDp1KmTpsIn0jg/Pz/p16+fuLm5KddTSkqKzJgxQ1QqlXTr1k0GDx4srVu3Fmtra05xJXrJ1q1bxczMTFq0aCHt27cXlUoloaGhkpiYKIsWLRIDAwMxNzeXmjVrSvXq1eX8+fOaDrlYSUtLk2bNmsns2bMLbc9flHnOnDnKw6179+7xJk2N9u3bJ506dRIvLy/R1taWmTNnvnHfl6fyWVhYKMU3SD22bdsm+vr60rdvX7ly5YqyPT09XcaOHSsWFhZy7do1DUZYPN2+fVvq1asnZ8+elfT0dImLi5Ovv/5aWrZsKfv37xeRvDWmWrRoIQ4ODiLC+1X633BMqppJXiJQGW7fo0cPREdHY9asWahXrx709PSUKX3z5s1DVlYW9u7dCwAsoalm+Z+3r68vFi9ejDlz5uD48eNo0KABfHx8cPPmTfTr1w9btmxBlSpVkJCQgLp16yIqKgoqlQpPnjxBhQoVkJOTw1L2VOykpqbi5s2bCA0NxY0bN5TrycjICKNGjcKBAweQk5ODJ0+ewNraGhcvXoSOjg5ycnI0HDmR5jx8+FDpA1y4cAFDhw7FTz/9hF9//VWZhhQdHY3y5cvDzc0Nly9fRkBAAJYsWaJ8P5H6ZGdn486dO0r7JiLIzc3F0KFD4eDgAC8vL/j7+wMAzMzMlClj9Pa93M9q1qwZ7ty5AwMDA4SEhMDHxwd+fn6vPbbgVL6tW7eiSZMmqFevnjrCpv/XpUsX+Pv7Y+PGjejevTucnZ3h6uqKXr16YdWqVdi+fTtq1qyp6TCLlRkzZsDT0xO1atVC3bp1YWBggIoVK2Lt2rXIzMxUppDb2NggKChImX7J+1X6n2g0JVaMpKeny6lTp0Tkrwzy4cOHpUaNGkrmPzk5WU6cOCHLly+X48ePi4hI3759xc3NTTNBk6SlpUmnTp1k27ZtIpJXJtjY2FiWLVsmIq9fvDwjI0NGjhwppqamylQ/og/d66YSX79+Xby8vESlUsnatWtf2fflYzhSioqzx48fS9myZWXdunUiIrJz507p3LmziORN/6pcuXKhctusrKdeb1ouwcPDQ6ytreXq1asi8lcfb9SoUdK6dWtRqVSyZcsWtcVZXL1uUfOwsDBp2bKlXL9+XQICApTRa2/C7yDN++233+Tbb78VGxsb+eKLL8Tb21tiY2M1HVaxk52dLXPnzpUSJUqIpaWlpKWliYhIVlaWiIgcOXJE9PT0JDo6utBxXFaG/ldMSqnJpEmTRKVSKdXaRER2794tNWvWlOjoaAkNDZX+/fuLhYWFWFpaio2NjVy5ckViYmLEwMBAtm7dqsHoi4+Xh5w+ePBAKlWqJMePH5d9+/aJoaGhLF26VETyKofNmDFDIiIilP2vXbsm48ePFysrK06joGKjYCfk/v37hdaEePDggXh4eEipUqVkw4YNyvbs7OxC1xuHe1Nxl5mZKV26dBFHR0d5+vSprFq1Sho1aiRXrlwRCwsLGTx4sHKt7d27V5ydneXRo0cajrp4KNjGXbhwQU6ePKkst3Dq1Clp27atODg4KDfPz549EwcHB9m5c6cMHTpUGjZsKI8ePWI7946sXbtWVCqV9O/fX6ZOnaokl+7duyeNGzeWsLAwERFZsGCBaGtri5+fnybDpf+A01zV73XJpNTUVFmxYoXo6OjI+PHjC7125MgR+fTTT/+2wiXRf4PT99SkZ8+eGDhwIHr06IEDBw4AABo1aoSnT5/C0dERAwYMQKVKlRAaGort27cjJSUFN2/ehJWVFSZNmoS4uDhWaXnHbt26pXzGM2fORExMDExNTfHVV19h+fLlcHR0xNy5c+Hi4gIAiIuLw4kTJ5QKewBQs2ZNfPvttzhy5AinUVCxkV+dZfz48WjZsiVsbW3RuHFjbNmyBYaGhpg8eTIGDRqEoUOHYuPGjQDypksUHOLN4d5U3Onp6aFNmzY4dOgQEhMT8dVXX8HAwADNmzdHq1atEBgYqFwn+/fvx+PHj1kZTE3yP+fRo0fD3t4ednZ2aNu2LXbt2oUmTZpg2LBhSE9PR+PGjdGhQwc0atQIN27cQKdOnVC5cmVoa2ujTJkybOfekcuXLwMA7t27h8OHD6N+/fqYN28eRAT9+/fHpEmTkJaWhuHDh2PhwoUYPXo0QkJCNBw1vUnBdk24/MU7V7DK3rVr13DhwgWICAwNDTFw4EDMmzcP06dPx48//ohTp07h6tWrmD17NkxNTWFhYaHh6OmDoeGkWLFy/fp1cXZ2FhMTE9m9e7eIiCQlJcnPP/8sMTExyhO0xMREqVOnjuzcuVNERG7cuMGnoe9Qbm6unDt3TlQqlYSFhYmHh4eULFlSqWAUGBgoKpVKHB0dJSUlRUREHj58KB07dpQvv/xSeaLDJ6BU3BR8shYUFCSmpqaybt062bdvnzg4OEi9evVk3rx5kpWVJXFxcTJq1ChRqVTK4phElKfg90eDBg2kT58+IiLi7e0t5cuXl5kzZ0piYqLcunVLvL295aOPPnpl2gS9fQXbuJ07d0rt2rVl3759cvbsWWnTpo3Y2trKpk2bRETk7t27snTpUnFzc5MpU6YoI6kGDx4s3bt3l4yMDPYT3pHs7GwZMWKElCpVSg4fPizz5s2TgQMHSunSpaVv375iYmKiVIAVyVtUm1P1iArz9vYWMzMzMTIykho1asjkyZMlPj5eREQCAgKkRIkSolKpxNPTU+zt7eX58+ciwil79HYwKaUGL5dHfzkxlX8xJyUlSUxMjNSuXVu6du2qkViLs+HDh0vJkiWlVKlScubMmUKvTZs2TYyNjcXOzk46dOggLVq0EBsbG2VuNYcaU3G2Y8cOCQwMlBUrVhTa7u7uLpaWlnL69GkREfnjjz9k4cKFvBkgElE69Pnyr4vZs2dL/fr1lZuBH374QWxsbERfX19sbW3F0tKS08PVLCwsTLy8vJTKuyJ5lUQdHBzk888/l5CQEKU/kC8xMVFGjBghxsbGTCCqQW5urvTr109MTU3l4MGDIpJXga93795ibW2tPGgsiN9FVJwVTCZt3rxZKlWqJOHh4RIdHS2jRo2SJk2aiIuLizx48EBERFauXCkGBgYyZcoU5biCVeOJ/g2VCMdFvis5OTlKlb2Cfv/9d8yZMwfbt29HaGgovvrqK2RmZmLq1KnYvn076tevrwwrLjikkt6N7Oxs6OjoYM2aNXB2doa+vj7Wr1+PDh06oGTJksp+27dvx5UrVxAXF4c6depg8ODB0NHRUY4nKo5u376NmjVrIjs7G1OmTMH48eMLXRMNGzZE7dq1ERwcXOg4XjdUnN26dQsjR46Evb09nJycYGBgoLx279491KtXD66urvD19QUAxMbG4sqVK7CwsICZmRkqVKigqdCLnadPn8LGxkapuBsUFKS8lpqaiu+//x7Jycno06cPBg0aBG1tbSQnJ2PJkiU4cOAAAgICYGNjo7k38IESkddOh/zhhx+wbds2bNq0CR07dkR6eroyFYl9aqJXbdq0CfHx8cjOzsbo0aOV7f7+/li5ciXGjBmD7777Dk+fPsXatWsxbNgwzJgxA15eXhqMmj40TEq9IwUTUjt37kROTg709PTQsWNHAHlzdmfPnl0oMXX//n1ERESgZ8+eAJiQetde/nyfPHkCLS0tTJgwAcuXL8eKFSvQvXv3QjcLL3tT4pGouMjNzcWhQ4fg7u4OMzMz7Nu3D3p6esr15ebmhuTkZISGhmo6VKIiIyYmBl5eXti7dy+aNWuG5s2bY+zYsdDT04O+vj5mzpyJDRs2YNOmTahdu7amwy1WXpfsiI+Ph5OTEx49eoTZs2ejffv2yj6pqan45ptvYG1tjcDAwELH6OrqwtTUVK3xf+ju3LmDcuXKoWTJkm9MTA0YMAChoaHYunUr2rdvr4Eoid4PqampsLS0RGJiIgYNGlSoDQMAe3t7pKam4ujRowCArKwsrFq1Cm5ubpg7dy5GjBihgajpQ8SMxzuQm5urJCp69uwJLy8vjBkzBh4eHvDw8AAAWFpawsvLC927d0fPnj0RHh6OSpUqMSGlJgU/36ioKFy4cAHa2towMjKCv78/+vXrh0GDBiE8PBzPnz8HALi6uuL69euFzsOEFBV3WlpasLOzw6JFixAdHQ0nJyc8efIEWVlZyM7OxtmzZ2FkZKTpMImKlFq1amHnzp2IjIyElZUVNm/ejDp16mD8+PGIjo7GV199hZSUFNy+fRtA3ncWvXu5ublKkuPmzZu4d+8e7ty5g4oVKyIkJAQGBgaYPXu2UrAGAIyMjLBv3z4sXbpUOQcAVKxYkQmpt+zUqVOws7NDaGgonj17BpVK9dqFsFetWgUnJyf06tUL4eHhGoiUqOgTERgZGeH06dNo3LgxDh8+jN9//73QPi1atICWlpZyL6SnpwdnZ2csX76cCV96qzhS6h0aNGgQTp48ib1798LMzAyDBw9GUFAQvv/+e6xZswZA3oipsWPH4uOPP1Y6NKQ+o0ePxrZt2xAXFwd7e3s4OjrC0dERAODi4oL169dj4MCBOH/+PO7du4fY2FhOOaJi6ezZszA3N8fHH3/82tdFBAcPHkSfPn1gbGyM6tWrw8jICJcuXcLFixehq6ur5oiJ3g+ZmZl49uwZpk2bhlOnTuHMmTPw8fHB4sWLUaVKFRw/fhyGhoaaDvODV3DUzeTJk7Fz506kp6cjMzMTkyZNwvfff4+4uDg4ODigdOnS8PHxQdu2bQudgw8U3z17e3vcvXsXo0aNwrfffgsDA4PXjpgSETg5OeHhw4c4ePCghqIlKjpebp8KXjd37txBu3btYGRkhCVLluDTTz+Frq4u2rVrh4oVK2LLli2aCpuKCSal3oKnT58iJCQEvXr1QunSpQEAkZGRmDJlCqZMmaKUpp0+fTrc3d0xZ84c9O3bF8uWLQMAJCQkcH0INSnYIO/Zsweenp5YunQpUlJSEBgYiBcvXqBv377o378/AGDixImIiYmBrq4u1q5dC11dXU7Zo2JnyZIlcHd3x9WrV2FlZfXG/UQEhw4dgoeHBzIyMrBnzx5YW1sD4BpSRP9EcnIydu3ahTVr1uDs2bPQ19fHtWvXUK5cOU2HVmxMmTIFCxcuREhICCwtLeHp6Yk9e/bg4sWLsLS0RFxcHLp27YqMjAysXr0an3/+uaZDLhYK9r169OiBq1evwtvb+7WJqYcPHyImJgafffYZ9PT0mCikYq/g/c/KlSsRExODhIQEjBw5Eg0aNICWlhbu3LmD9u3bIy4uDjVq1MCnn36KP/74AydPnoSent4bp8sSvRXqXFX9QzVt2jQZOHBgoW0ZGRkSFBQkmZmZsmHDBqlSpYrs3btXRER69uwpKpVK7O3tCx3DUsHvVnJysvLz7t27xdXVVWbPnq1su3LlinTv3l1atWola9asUbanpqYqP7NSCxU3y5YtE319fdm6des/2v/Fixdy4MABKVu2rPTo0UPZzvaN6M1evj4SExPlt99+kxs3bmgoouIpLS1N2rVrJ9u2bRORvKp7JiYmsnTpUhERefbsmYiI3LlzRwYMGMDKu2pW8PN2dHSU2rVry7p16yQ9PV3ZnpiYKDY2NuLg4KBcVyxZT5TH29tbzMzMpE+fPtKjRw8xNDSUNWvWSEpKioiI3L59W5o2bSomJiZy8uRJ5biXq4sSvW1MSr1le/bskbi4OBH568tz4MCBMmLECCWhMXbsWBk0aJC4u7trLM7i5vjx49KqVSs5cuSIPH36VOrUqSP6+vqv/A7yE1Nt2rRROqH5eFNNxc2qVatEV1dXfvnll0LbY2Ji/va43Nxc2b9/v1SoUEHat2//LkMkInpr4uLixNjYWK5cuSIHDx4UQ0PDQgmpn376Sa5evVroGCam3r2CSaWXE1PW1tYSHBwsz58/lydPnsgXX3whtWrV4k000UtWrVolVapUkfPnz4uISEREhKhUKjE0NJTFixcXSkx9+umn0qxZM4mPj9dkyFSMcDzrv5SVlaX8vHfvXnTq1AkbN25EcnIytLW1kZmZiUuXLuHevXvQ0dHB/fv3cfz4cXTs2BEBAQEA8NpFGuntKl++PABgzpw5iIuLQ1hYGD7//HP8+uuv2LNnj7KftbU1pk6dipycHFy5cqXQOThklYqTK1eu4Mcff0THjh2VqqEA0L17d4wePbpQ2/cylUqFtm3bYuXKlYiNjcX9+/fVETIR0b9SsWJFODg4wNfXFw4ODliwYAFcXFwAAElJSTh16hSio6MB/NV343T+d+PQoUPw9fUFkFdQI38BeW1tbeTk5AAANm/ejNq1a2PWrFlYtWoVOnbsiAcPHijrGGZnZ2ssfqKi5Pnz50hPT8eECRPQoEEDhIeHo2PHjtiwYQNcXV3h7e2NLVu24MmTJ7CwsMChQ4eQkpICOzs7xMfHazp8KgaYlPoXRAR6enoAgHHjxqFdu3bw8vLCokWLsG7dOiQlJUFfXx8jRoxAWFgYGjVqhCZNmsDExARdunRRzsNkx7tnaWmJ5cuXIzs7G25ubnjx4gVWr14NQ0NDLF26FPv371f2rVWrFoKCguDv76/BiIk0y8TEBC4uLrh58yZmzJgBIK+a6LVr17Bo0SKl7XsTlUqFjh074vLly6hUqZI6QiYi+tdq1aqFPXv2wN7eHv369QMApKSkYOjQoXjx4gW6desGgH23dykzMxObN2/G5s2bMWfOHACvJqbyE06bN29G3bp14e7ujoyMDFy6dElJSHEdQ6I8JUqUQJs2bdChQwfcunUL48ePx+TJk9GrVy989913yMrKwqBBg3DkyBEAgLm5OXbu3IlSpUohMzNTw9FTccCFzv9HBReMGzt2LIKDg3H69GlUrlwZ3t7eCA0NxbBhw/DDDz/A2NgYBw8exMGDB1GpUiUMGzbslXOQely/fh3u7u4AgICAAOTm5mLQoEEwMTGBh4cHvvrqq0L783dExYm8tIjl/fv3sWLFCmzevBmZmZkwMjLCnj17WJiBiN5bbypWUrD9c3Nzw6FDh1C+fHlUqVIFt27dQkZGBs6ePcuCJ2oSFxeH2bNn4/Tp0+jatSu8vb0BFO6XFfydzZ8/Hx4eHtDR0WFCiuhvHD16FJ6enggJCYG1tTXOnz+P7du3o1KlShg4cGCha4fXEqkLk1L/0rZt27B37170798fzZs3V7Z7e3tj06ZN8PT0xPfff4+yZcsWOo7JDs15OTElIhg8eDBevHgBf39/VtKhYu358+fQ0tKCSqWCrq4u7t27h5UrV2LVqlXo3r07FixYAODNN3ZEREXVkydPYGxsDADYuHEjmjZtik8++UR5vWC7tmHDBpw/fx5paWmwtLTE8OHDmfBQs4SEBEybNg1nz559bWIqMTERnp6ecHJygoODAwDeRBP9J9u3b0fPnj2xfft2VK1aFV5eXjA2Nsb69esB8BoizWBS6l/YsGEDfvrpJ6Snp+Po0aOwtLREZmYm9PX1AQBjxozBpk2b4OzsDE9PTxgZGWk4YsqXn5hSqVRYuHAhsrKysGTJEixatIjJQiqWDh8+jF27dmHHjh3Q09ODubk55s2bB2trayQmJmLp0qUIDQ1Fz549MWnSJABMTBHR++PXX39Vpq7MmjULW7ZswYkTJ1C5cuVC+/1du8Y2T/0KJqa6dOmCMWPGAADi4+Ph6OiIpKQkXL16lTfRVOy9PNr97/Tr1w/BwcGwsLCAiYkJfvvtN+jq6r7jCInejEmpf+H+/fvw8/PDqlWr4OzsrIwgyMrKUtZbcXFxQcmSJTFv3jwNRkqvc/36dQwfPhyJiYkICwtDlSpVAHAUGxU/a9aswZQpU9CqVStUq1YNiYmJOHDgABISErB69Wp069at0FQ+JycnTJw4UdNhExH9Y/Hx8XBxccGvv/6K3NxcXLhwAVWrVtV0WPQPFExMde/eHc7OznB0dERiYiKioqI4pZKKvYL3LvkDJPK3Fbw2Cu535MgR6OjooFmzZso6bUzukqYwKfUPvfxll//3xMREzJw5E8eOHYOjoyPGjh0LoHBiKt9/k8Em9YiJicGKFSvg5+fHRBQVS4GBgfDw8MC6devQuXNnlCpVCgBw9epVjBkzBsePH8e+ffvQuHFj/Pnnn0oRgDlz5sDZ2VnD0RMR/XMTJ06Er68vjI2NERUVBXNzcz6Iek8kJCRg+vTpOHPmDH7//XeYmZkVqrLHm2kqrgq2YQsWLMDp06fx4MEDfP755xg+fDgqVqz4xv3zMalLmsak1D9Q8EL18/PD3bt3kZubCycnJ7Ro0QIPHjyAr6+vshhj/tDigscxIVX0sWNKxU1ISAj69OmDw4cPo1WrVgAKt1XXr19H37598fTpU5w+fRqGhoa4d+8e9u/fj379+rEDQ0RFWn57lv9nbGwsEhISMH/+fERERODYsWOwtrZmUuM9kZCQAG9vbzx48ADh4eFMSBEVMGbMGKxatQqTJ09GRkYGVqxYARMTExw9ehQlSpTQdHhEf4t34P9A/o2Xvb09Vq5ciTt37iAmJgYtW7aEn58fypUrBx8fHzRt2hRhYWEYN25coeMAlg5+HzAhRcXJs2fPcOLECQCAqakpgLzEbMG2qlq1aujbty/i4uKQmJgIAKhcuTKcnZ2hra2NnJwc9QdORPQPFGzPUlJSkJSUhJo1a6Jly5YIDAzE559/ji+//BKxsbFKUmPBggX4888/NRk2/Y0KFSpgwYIF2LVrFxNSVOyJCPLHlly8eBG//PILwsPD4erqCisrK8THx2PAgAGFElIci0JFFe/C/6HVq1fj0qVLOHbsGMLDw3Ho0CHMnz8f3t7eWLt2LT7++GN4eXnB0tLylWl7RERFjYGBAaZMmYLevXujSZMmOHnyJLS0tJQOS25uLrS1tdGmTRs8efIEDx48eOUcHClFREVV/oOmiRMnom3btqhduzb69++P8PBwlC9fHsHBwbC1tYWtrS1WrVoFOzs7BAcHw8zMTMOR098xMTGBlpYWcnNzmZCiYmnEiBHYs2cPVCqVknh//Pgx0tPT0axZM4SFhaFXr16YPXs2Bg0ahPT0dGzcuBHPnj3jIAkqstiav8HLT1+Sk5Nhbm6Ojz/+GDk5OdDS0sLw4cPx4MEDjB07Fm3btkWlSpUQEBCgVNnjlD0iKmpiY2Px6NEj6Ovro0GDBli1ahVycnLw9ddfY//+/WjWrFmhxTF//fVXNG3aFHXq1NF06ERE/1HBqfj+/v4IDAzExIkToauri7Vr12Lu3LlISEjAkCFDEBISglGjRiEgIADm5ubYt28ftLW1OZ3/PcDfDxVHsbGxuHr1Ko4cOYISJUqgdevWAIAyZcrA0tISgYGBGD16NPz8/DBkyBAAQFRUFPbu3Yv69evD2tpak+ETvRFb9NfIyMhQElLR0dEAgNKlS+PixYt4+PChUqEAAL7++mtoaWkhNTUVAJiQIqIia+3atejSpQvatWuHTp06YejQodDX18eqVavg4OCAr7/+WhkxBeRN8duxYwdsbGyUBdCJiIqy/Pbr3LlzSEtLg7+/P9zc3DB48GCEhISgRo0aCA4ORmRkJMqUKYOVK1di9+7dhdYoYsKDiIqimjVrYvLkybCysoKnpycOHz4MALC0tER8fDyGDh2KCRMmKAmp58+fY9q0aXj69CmsrKw0GTrR3+K37kvWrFmD4cOHAwBat26NcePGIScnB+3bt0e9evXg5uaG+Ph46OrqAgBKliyJUqVKKUmqfExIEVFREhgYiCFDhmD48OHYtm0bunbtivDwcMycORMlS5bE/Pnz0bVrV3z99dc4c+YMAKBHjx64f/8+/P39lcWCiYiKuqioKNja2mLixIl4/PgxgLyHhRYWFpg6dSpu376NQ4cOKfubmZlBpVJxShgRFVkvXrwAADRp0gROTk6oVq0ahg0bhuPHj6NkyZL45ZdfULlyZezcuRPz58/HihUr8M033+DPP/9EaGioMu2VqChi9b0CsrOzMW3aNOzduxfJycnQ1tbGxYsXoa+vDyBvXang4GBkZ2dj1KhRyMrKwtSpU1G7dm1s2rRJw9ETEb1eWFgYunXrhvDwcHTu3BkAkJqaii+//BJVq1bFzz//DCBvmvLIkSMRHh6OTz75BJmZmbh8+TJ0dXVZLpiIiqzXjU4PCQnBgAED4OTkhICAAJQqVUrZx9HREaVLl8bq1as1ES4R0f9s8uTJOHfuHJKSkhAZGQkrKyssWLAAbdu2xe3bt+Hq6or4+HiUKVMG1atXx7Jly1gYgIo8/sv8fyICHR0d/PTTT/jll19w48YNDBgwoNDF6+zsjI8++ggbNmxA3759YWVlhUaNGmHNmjXKOThCioiKkszMTOzbtw/VqlXDnTt3lO1GRkaoW7cu0tLSkJmZCX19fZiammLevHnIycnB77//jnPnzrEjQ0RFWsH1n/LbMgDo3bs3nj9/jkGDBqFy5crw9PRE2bJl8ezZM8TGxqJ9+/aaDJuI6L+2fPly+Pn5YdeuXbCyskJERASCgoIwcuRILFiwAHZ2dtixYwfS09Ohp6cHAwMDAK+ulUxU1HCkFAp3aHJzc+Hn54eUlBQcO3YM9evXx4wZM5S1ovLdv38fBgYG+Oijj145BxFRURIfH49Zs2bh1KlT6NKlC8aOHYs9e/bgm2++wcGDB2FnZ1coqZ6cnIyyZctCpVKxI0NE74V58+bh/PnzMDAwwI8//ohq1apBV1cXK1euxJAhQ9CkSRNYWVkhOTkZt27dQmRkpLIUAxHR+2DIkCFIS0tDSEiIsu3o0aPw8fFBamoqli1bhhYtWhQ6hoMm6H3ALAr+WhRz1KhR2LlzJ7y8vODr64t27drh/Pnz8PHxQVpaGoC8KS+RkZH4+OOPlYSUiDAhRURFVsWKFTFmzBjY2tril19+wXfffYeePXsiKCgIdnZ2yM3NLdRhMTU15foqRPTemDt3LqZOnYqPPvoIe/fuhaOjI8LDw5GZmYmBAwdi9erVOHPmDK5evQpXV1dcuHABurq6yhotRETvg7Jly+L27dtKgS0AaNWqFbp27YqrV6+ie/fuOHv2bKFjmJCi90Gxy6S8aWBYfHw87t69Cw8PD2zbtg0qlQre3t745ptvcPHiRbi7uyMyMhL169fHypUrC92o8WInoqKuQoUK8PHxQcOGDXHo0CHY2dmhX79+AN7cLjLZTkRF0cuL9SYmJmL9+vVYuHAh/vzzT1SpUgXTpk1TElP9+vXDypUrcebMGZw9exba2trIycnhSCkiKpLetCB5vXr1EB8fj507dyI9PV3ZXrVqVXzzzTfw8vJCw4YN1RUm0VtTrB6BFxy+uGvXLtja2qJ8+fIA8kYSTJ48GQsXLoSnpycAoHv37vDy8oKBgQG2bNkCe3t7tGjRAkuXLtXUWyAi+p9VrFgR48aNAwCcPXsWs2bNgre3N7S1tTm8m4jeCwVHpx8+fBjPnj3Do0ePlP4ckFfcoWvXrpgxYwZUKhU6d+6Mfv36ITs7G25ubkhPT8e0adM09RaIiN6o4JIwBw4cQEZGBjIzM9GjRw84OTnh6NGj+PHHH5GWloaWLVuiXLlyWLduHWrXro2RI0dCpVKxOA29d4rNmlIFb7hcXV0RERGBkydPIjExEdWrV1f2u3r1Kvz9/bF7924sXLgQXbt2RXZ2NpKSkpCUlAQbGxsAXEOKiN5fCQkJmD59OiIjI9G6dWv4+vpqOiQiov+oYF9u5MiRWLt2LbS0tPDw4UOMGjUKvr6+ykLn2dnZ6N69O86dO4fg4GDY2dkBABYvXoyffvoJv//+O0xNTTX2XoiI/s6PP/6IkJAQGBkZIS4uDrVq1UJAQABsbW0xfPhwHD16FLdu3YKZmRm0tLRw6dIl6Ojo8CEjvZeKRVKq4MXp6emJ4OBgHDp0CJcvX4a3tze2bNmC5s2bK/tHR0djwoQJOHv2LFauXPlKhRYmpIjofZeQkAAvLy+UKFECgYGB7MAQUZFWsC8XGRkJLy8vTJkyBRUrVlSSTH379oWLiwv09PQA5CWmxo4di5kzZxYaNZCSkoIyZcpo5H0QEf0nQUFB8Pb2xr59+2BmZgYRgYODAzIzM7Fp0yZYWVkhKioKcXFxyMrKQufOnZVpyRwhRe+jDz4pVbATM23aNEyYMAG3b9+Gubk5du/ejaCgINy8eRMLFy4slJhatmwZXF1doa2tjVOnTuGzzz7T1FsgInonHj16BGNjY2hpafHJGhG9F0JDQ7F27VqUL18ea9asAQBkZGTA3d0dV69eRe/evQslpvKxkigRFXX5fTEfHx9cuXIF4eHhStv1/PlzNGrUCFWrVsWuXbteOZYJKXqffdDDfV4e5j1hwgSUKlUKixcvBgB07NgRrq6uqFq1Ktzd3REREaEcW6lSJQwcOBChoaFMSBHRB+mjjz6ClpbWK9X3iIiKoqysLERERODy5cu4fPmysr1kyZJYtGgRrK2tERoaijlz5rxSWY8JKSIqii5evIjw8HCcOHFC6YslJCTg4cOHAPLarmfPnqFEiRLw8/PDuXPncOvWrVcWQ2dCit5nH3RSKv/CHj58OIKDg/Hrr79i6dKlWL16NTw8PAAArVu3hpubG2rUqIEBAwYgODgYp0+fxuTJk1GtWjV069YNwJurIBARve84HZmIiqKX+156enrw9fXFgAED8PDhQ3h5eSnJp/zEVPny5XHnzh0moYioyNuwYQP69++P1atX45dfflG2//DDD7h48SIWLFgAADAwMAAAvHjxAqampihVqhT7bvRB+eCn7/3xxx+oWbMmzp8/DxsbGzx58gSbN2/GuHHj4OTkhICAAADAyZMnERwcjMDAQNSoUQO1atVCWFgYAHBaCxEREZEaFVy/8/r169DX14eIwMLCAk+fPsWMGTNw8OBB2NnZYerUqUoSKjMzE7q6upyWTERF2rp16+Di4oLVq1ejffv2MDY2Vl5LSUnBvHnzEBwcjMGDB2Po0KF48uQJ3N3d8eLFC+zZs4dtG31QPvikFAA8ffoUhoaGSgcnJSUFoaGhrySmcnJyEBsbi6ysLNSvXx8AFzUnIiIiUqeCyaTx48dj27ZtSE9PR25uLkaPHo3hw4cjPT0d06ZNw+HDh9GmTRtMnjy50Ogo9t+IqKi6cuUKevbsCU9PTwwcOFDZXrDtu3v3LjZt2gRfX1+ULFkSpUuXhomJCU6cOAFdXV22cfRBKRZjmw0NDQH8NUWlTJkycHJyAgCMGzcOWlpa8Pf3h7a2NmrVqqUcJyK82ImIiIjUKP+mbNasWVi2bBnWr1+P3NxcREdHY8SIEYiLi8OsWbPg7e0NlUqFTZs2oUqVKnBxcVHOwf4bERVV9+/fR0ZGBlq2bFkoEZX/p4jA3NwcXl5e+O6773Du3DmUKVMGX3zxBbS1tVm4gT44xfZfs5GREZycnKBSqTBu3DikpKQoVVzycVgkERERkXrkP/kXEbx48QKHDx/GyJEj0b59ewB5BWo++eQTODk5oWHDhujZsye8vLxgbm5eaLQBEVFRFhkZibS0NNSsWRPAq0vFqFQqxMTEIDExEa1atUKlSpWU13JycpiQog9OsX6MZGRkhJ49e2Ls2LEoVaqUpsMhIiIiKpYKjk6PiYmBnp4erl+/rmzLyclBdnY2evTogX79+mH9+vXIyMhAmTJlMGTIEGhrayMnJ0eTb4GI6B/59NNPkZ6ejv379wN4/UCIdevWISQkBC+vtMMqe/QhKtZJKSAvMeXi4oLFixcDwCsXPhERERG9OwVHCfz444/o1KkTcnNzYW9vj3Xr1iE2Nhba2trKPmXKlIFKpULJkiULnYc3a0T0PmjUqBH09PSwfPly3L17V9mefx+ampqK69evo27dupy5Q8VCsU9KAX+V2WSVFiIiIiL1Kdj3ioqKwo0bNxAcHAwtLS107doVlStXhre3N27cuAFtbW1kZmbiypUrqFixooYjJyL631SrVg3Lli3Drl27MHbsWFy4cAFA3oipuLg4ODk5ISEhAUOHDtVwpETqUSyq7xERERFR0bVhwwYsX74cOjo62LFjh7KswqZNm7B69WqcOXMGDRs2xKNHj5CdnY0LFy5AV1eXDxSJ6L2Uk5ODoKAguLq64uOPP0adOnWQm5uLlJQU5ObmKlX2cnJyOAqUPnhMShERERGRWsXGxuLx48fQ1tbGZ599hiVLlmDBggVITU1FdHQ0TE1NlX2vX7+OY8eO4caNGyhfvjw8PDygo6PDClRE9N6LiorC6tWrce3aNVSpUgUNGjSAi4sLq+xRscKkFBERERGpzdq1azFr1izcv38fhoaG6NGjB+bPn4+NGzdi4sSJaNiwIRYsWPC3U/Q4eoCIPmRs46g4YeqViIiIiNQiMDAQw4cPh7+/P6pXr46wsDBs3LgR5ubmGDFiBJKTkxEaGgofHx/MmDEDFSpUeO3NGW/WiOhD8bppyGzjqDjhSCkiIiIieufCwsLQrVs3hIeHo3PnzgDyqkx9+eWXsLCwQFhYGAAgICAAmzdvRs2aNTFlyhRUqlRJg1ETERHRu8Tqe0RERET0TmVmZmLfvn2oVq0a7ty5o2w3MjJSyp5nZGQAADw8PNCzZ09ERERgzZo1GoqYiIiI1IHT94iIiIjondLX18fEiROhr6+P4OBgpKWlYezYsdizZw/Wr1+PgwcPomTJkspUPXd3d1SoUAFdu3bVdOhERET0DnH6HhERERGpRUJCAqZNm4YLFy7AwsICO3fuREBAAPr164fc3FxoaWkpf+bjgr9EREQfLialiIiIiEht4uPjMWPGDGzevBlNmjRR1pJi8omIiKj44ZpSRERERKQ2FStWxLhx49CjRw8kJiZi1qxZAPKqTfFZKRERUfHCkVJEREREpHYJCQmYPn06IiMj0bp1a/j6+mo6JCIiIlIzjpQiIiIiIrWrUKECfHx8UL16dSQlJXGUFBERUTHEkVJEREREpDGPHj2CsbExtLS0ICJQqVSaDomIiIjUhEkpIiIiItK4l6vuERER0YePSSkiIiIiIiIiIlI7Po4iIiIiIiIiIiK1Y1KKiIiIiIiIiIjUjkkpIiIiIiIiIiJSOyaliIiIiIiIiIhI7ZiUIiIiIiIiIiIitWNSioiIiIiIiIiI1I5JKSIiIqL3wNGjR6FSqfDkyRNNh0JERET0VjApRURERPQWqFSqv/1v0qRJ/+r8zZo1Q3x8PMqUKfN2AiYiIiLSMJWIiKaDICIiInrfJSQkKD+HhoZi4sSJuHbtmrLN0NAQhoaGmgiNiIiIqEjiSCkiIiKit6BChQrKf2XKlIFKpVL+Xr58ecybNw+VK1eGvr4+bGxssHfvXuXY27dvQ6VSYdOmTWjWrBlKlCiBOnXq4NixY8o+r5u+d+LECbRq1QolS5aEiYkJ2rVrh8ePHwMAtm7dirp168LAwABly5ZF27ZtkZ6errbPg4iIiOg/YVKKiIiI6B3z9/fH3Llz4efnh0uXLqFdu3awt7fH9evXC+03evRojBo1ChcuXEDTpk3RuXNnPHz48LXnjIqKQps2bWBtbY1Tp04hIiICnTt3Rk5ODuLj49GrVy84OzsjJiYGR48eRbdu3cAB8kRERFSUcPoeERER0Vu2Zs0aeHp6KqOaKlWqBDc3N/j4+Cj72Nra4vPPP8fixYtx+/ZtVK1aFTNnzoS3tzcAIDs7G1WrVoWHhwe8vLxw9OhRtG7dGo8fP4axsTF69+6Nu3fvIiIi4pX///nz59GoUSPcvn0bFhYWannPRERERP8tjpQiIiIieodSU1MRFxeH5s2bF9revHlzxMTEFNrWtGlT5WcdHR189tlnr+yTL3+k1OvUr18fbdq0Qd26deHo6IgVK1Yo0/qIiIiIigompYiIiIjeQwYGBm98TVtbGwcOHMCePXtgbW2NgIAAWFpa4tatW2qMkIiIiOjvMSlFRERE9A4ZGRnBzMwMJ06cKLT9xIkTsLa2LrTt9OnTys/Z2dmIjIxErVq1XnveevXq4dChQ2/8/6pUKjRv3hyTJ0/GhQsXoKenh+3bt/+Ld0JERET0duloOgAiIiKiD93o0aPx008/oXr16rCxsUFQUBCioqKwYcOGQvstXrwYNWrUQK1atTB//nw8fvwYzs7Orz3n2LFjUbduXbi6usLFxQV6eno4cuQIHB0dcePGDRw6dAhff/01ypcvj99++w0PHjx4Y4KLiIiISBOYlCIiIiJ6x4YNG4aUlBSMGjUKSUlJsLa2xo4dO1CjRo1C+82cORMzZ85EVFQUPv30U+zYsQOmpqavPWfNmjWxf/9++Pj4wNbWFgYGBmjcuDF69eoFIyMjHD9+HAsWLEBqaiosLCwwd+5cdOjQQR1vl4iIiOgfYfU9IiIiIg3Lr7534cIF2NjYaDocIiIiIrXgmlJERERERERERKR2TEoREREREREREZHacfoeERERERERERGpHUdKERERERERERGR2jEpRUREREREREREasekFBERERERERERqR2TUkREREREREREpHZMShERERERERERkdoxKUVERERERERERGrHpBQREREREREREakdk1JERERERERERKR2TEoREREREREREZHa/R9REDnQU4pyKQAAAABJRU5ErkJggg==",
|
||
"text/plain": [
|
||
"Result(<Figure size 1200x600 with 1 Axes>)"
|
||
]
|
||
},
|
||
"execution_count": 15,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"result = code_interpreter_results[0]\n",
|
||
"print(result)\n",
|
||
"\n",
|
||
"result"
|
||
]
|
||
}
|
||
],
|
||
"metadata": {
|
||
"kernelspec": {
|
||
"display_name": "Python 3",
|
||
"language": "python",
|
||
"name": "python3"
|
||
},
|
||
"language_info": {
|
||
"codemirror_mode": {
|
||
"name": "ipython",
|
||
"version": 3
|
||
},
|
||
"file_extension": ".py",
|
||
"mimetype": "text/x-python",
|
||
"name": "python",
|
||
"nbconvert_exporter": "python",
|
||
"pygments_lexer": "ipython3",
|
||
"version": "3.10.13"
|
||
}
|
||
},
|
||
"nbformat": 4,
|
||
"nbformat_minor": 2
|
||
}
|