mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 11:42:29 +08:00
Revert "chore: use node specify llm to auto generate prompt" (#6555)
This commit is contained in:
parent
e80412df23
commit
d726473c6d
|
@ -14,7 +14,6 @@ import PromptEditorHeightResizeWrap from './prompt-editor-height-resize-wrap'
|
||||||
import cn from '@/utils/classnames'
|
import cn from '@/utils/classnames'
|
||||||
import { type PromptVariable } from '@/models/debug'
|
import { type PromptVariable } from '@/models/debug'
|
||||||
import Tooltip from '@/app/components/base/tooltip'
|
import Tooltip from '@/app/components/base/tooltip'
|
||||||
import type { CompletionParams } from '@/types/app'
|
|
||||||
import { AppType } from '@/types/app'
|
import { AppType } from '@/types/app'
|
||||||
import { getNewVar, getVars } from '@/utils/var'
|
import { getNewVar, getVars } from '@/utils/var'
|
||||||
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
|
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
|
||||||
|
@ -59,7 +58,6 @@ const Prompt: FC<ISimplePromptInput> = ({
|
||||||
const { eventEmitter } = useEventEmitterContextContext()
|
const { eventEmitter } = useEventEmitterContextContext()
|
||||||
const {
|
const {
|
||||||
modelConfig,
|
modelConfig,
|
||||||
completionParams,
|
|
||||||
dataSets,
|
dataSets,
|
||||||
setModelConfig,
|
setModelConfig,
|
||||||
setPrevPromptConfig,
|
setPrevPromptConfig,
|
||||||
|
@ -249,14 +247,6 @@ const Prompt: FC<ISimplePromptInput> = ({
|
||||||
{showAutomatic && (
|
{showAutomatic && (
|
||||||
<GetAutomaticResModal
|
<GetAutomaticResModal
|
||||||
mode={mode as AppType}
|
mode={mode as AppType}
|
||||||
model={
|
|
||||||
{
|
|
||||||
provider: modelConfig.provider,
|
|
||||||
name: modelConfig.model_id,
|
|
||||||
mode: modelConfig.mode,
|
|
||||||
completion_params: completionParams as CompletionParams,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isShow={showAutomatic}
|
isShow={showAutomatic}
|
||||||
onClose={showAutomaticFalse}
|
onClose={showAutomaticFalse}
|
||||||
onFinished={handleAutomaticRes}
|
onFinished={handleAutomaticRes}
|
||||||
|
|
|
@ -20,7 +20,6 @@ import Button from '@/app/components/base/button'
|
||||||
import Toast from '@/app/components/base/toast'
|
import Toast from '@/app/components/base/toast'
|
||||||
import { generateRule } from '@/service/debug'
|
import { generateRule } from '@/service/debug'
|
||||||
import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
|
import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
|
||||||
import type { Model } from '@/types/app'
|
|
||||||
import { AppType } from '@/types/app'
|
import { AppType } from '@/types/app'
|
||||||
import ConfigVar from '@/app/components/app/configuration/config-var'
|
import ConfigVar from '@/app/components/app/configuration/config-var'
|
||||||
import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement'
|
import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement'
|
||||||
|
@ -34,7 +33,6 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
|
||||||
|
|
||||||
export type IGetAutomaticResProps = {
|
export type IGetAutomaticResProps = {
|
||||||
mode: AppType
|
mode: AppType
|
||||||
model: Model
|
|
||||||
isShow: boolean
|
isShow: boolean
|
||||||
onClose: () => void
|
onClose: () => void
|
||||||
onFinished: (res: AutomaticRes) => void
|
onFinished: (res: AutomaticRes) => void
|
||||||
|
@ -59,7 +57,6 @@ const TryLabel: FC<{
|
||||||
|
|
||||||
const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
|
const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
|
||||||
mode,
|
mode,
|
||||||
model,
|
|
||||||
isShow,
|
isShow,
|
||||||
onClose,
|
onClose,
|
||||||
isInLLMNode,
|
isInLLMNode,
|
||||||
|
@ -152,17 +149,10 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
|
||||||
return
|
return
|
||||||
setLoadingTrue()
|
setLoadingTrue()
|
||||||
try {
|
try {
|
||||||
const { error, ...res } = await generateRule({
|
const res = await generateRule({
|
||||||
instruction,
|
instruction,
|
||||||
model_config: model,
|
|
||||||
})
|
})
|
||||||
setRes(res)
|
setRes(res)
|
||||||
if (error) {
|
|
||||||
Toast.notify({
|
|
||||||
type: 'error',
|
|
||||||
message: error,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
setLoadingFalse()
|
setLoadingFalse()
|
||||||
|
|
|
@ -9,7 +9,6 @@ import { useTranslation } from 'react-i18next'
|
||||||
import { useBoolean } from 'ahooks'
|
import { useBoolean } from 'ahooks'
|
||||||
import { BlockEnum, EditionType } from '../../../../types'
|
import { BlockEnum, EditionType } from '../../../../types'
|
||||||
import type {
|
import type {
|
||||||
ModelConfig,
|
|
||||||
Node,
|
Node,
|
||||||
NodeOutPutVar,
|
NodeOutPutVar,
|
||||||
Variable,
|
Variable,
|
||||||
|
@ -59,7 +58,6 @@ type Props = {
|
||||||
availableNodes?: Node[]
|
availableNodes?: Node[]
|
||||||
isSupportPromptGenerator?: boolean
|
isSupportPromptGenerator?: boolean
|
||||||
onGenerated?: (prompt: string) => void
|
onGenerated?: (prompt: string) => void
|
||||||
modelConfig?: ModelConfig
|
|
||||||
// for jinja
|
// for jinja
|
||||||
isSupportJinja?: boolean
|
isSupportJinja?: boolean
|
||||||
editionType?: EditionType
|
editionType?: EditionType
|
||||||
|
@ -92,7 +90,6 @@ const Editor: FC<Props> = ({
|
||||||
varList = [],
|
varList = [],
|
||||||
handleAddVariable,
|
handleAddVariable,
|
||||||
onGenerated,
|
onGenerated,
|
||||||
modelConfig,
|
|
||||||
}) => {
|
}) => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
const { eventEmitter } = useEventEmitterContextContext()
|
const { eventEmitter } = useEventEmitterContextContext()
|
||||||
|
@ -133,7 +130,7 @@ const Editor: FC<Props> = ({
|
||||||
<div className='flex items-center'>
|
<div className='flex items-center'>
|
||||||
<div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div>
|
<div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div>
|
||||||
{isSupportPromptGenerator && (
|
{isSupportPromptGenerator && (
|
||||||
<PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} modelConfig={modelConfig} />
|
<PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} />
|
||||||
)}
|
)}
|
||||||
|
|
||||||
<div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div>
|
<div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div>
|
||||||
|
|
|
@ -4,7 +4,7 @@ import React, { useCallback, useEffect, useState } from 'react'
|
||||||
import { uniqueId } from 'lodash-es'
|
import { uniqueId } from 'lodash-es'
|
||||||
import { useTranslation } from 'react-i18next'
|
import { useTranslation } from 'react-i18next'
|
||||||
import { RiQuestionLine } from '@remixicon/react'
|
import { RiQuestionLine } from '@remixicon/react'
|
||||||
import type { ModelConfig, PromptItem, Variable } from '../../../types'
|
import type { PromptItem, Variable } from '../../../types'
|
||||||
import { EditionType } from '../../../types'
|
import { EditionType } from '../../../types'
|
||||||
import { useWorkflowStore } from '../../../store'
|
import { useWorkflowStore } from '../../../store'
|
||||||
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
|
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
|
||||||
|
@ -38,7 +38,6 @@ type Props = {
|
||||||
availableNodes: any
|
availableNodes: any
|
||||||
varList: Variable[]
|
varList: Variable[]
|
||||||
handleAddVariable: (payload: any) => void
|
handleAddVariable: (payload: any) => void
|
||||||
modelConfig?: ModelConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const roleOptions = [
|
const roleOptions = [
|
||||||
|
@ -78,7 +77,6 @@ const ConfigPromptItem: FC<Props> = ({
|
||||||
availableNodes,
|
availableNodes,
|
||||||
varList,
|
varList,
|
||||||
handleAddVariable,
|
handleAddVariable,
|
||||||
modelConfig,
|
|
||||||
}) => {
|
}) => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
const workflowStore = useWorkflowStore()
|
const workflowStore = useWorkflowStore()
|
||||||
|
@ -140,7 +138,6 @@ const ConfigPromptItem: FC<Props> = ({
|
||||||
availableNodes={availableNodes}
|
availableNodes={availableNodes}
|
||||||
isSupportPromptGenerator={payload.role === PromptRole.system}
|
isSupportPromptGenerator={payload.role === PromptRole.system}
|
||||||
onGenerated={handleGenerated}
|
onGenerated={handleGenerated}
|
||||||
modelConfig={modelConfig}
|
|
||||||
isSupportJinja
|
isSupportJinja
|
||||||
editionType={payload.edition_type}
|
editionType={payload.edition_type}
|
||||||
onEditionTypeChange={onEditionTypeChange}
|
onEditionTypeChange={onEditionTypeChange}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next'
|
||||||
import produce from 'immer'
|
import produce from 'immer'
|
||||||
import { ReactSortable } from 'react-sortablejs'
|
import { ReactSortable } from 'react-sortablejs'
|
||||||
import { v4 as uuid4 } from 'uuid'
|
import { v4 as uuid4 } from 'uuid'
|
||||||
import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types'
|
import type { PromptItem, ValueSelector, Var, Variable } from '../../../types'
|
||||||
import { EditionType, PromptRole } from '../../../types'
|
import { EditionType, PromptRole } from '../../../types'
|
||||||
import useAvailableVarList from '../../_base/hooks/use-available-var-list'
|
import useAvailableVarList from '../../_base/hooks/use-available-var-list'
|
||||||
import { useWorkflowStore } from '../../../store'
|
import { useWorkflowStore } from '../../../store'
|
||||||
|
@ -33,7 +33,6 @@ type Props = {
|
||||||
}
|
}
|
||||||
varList?: Variable[]
|
varList?: Variable[]
|
||||||
handleAddVariable: (payload: any) => void
|
handleAddVariable: (payload: any) => void
|
||||||
modelConfig: ModelConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const ConfigPrompt: FC<Props> = ({
|
const ConfigPrompt: FC<Props> = ({
|
||||||
|
@ -48,7 +47,6 @@ const ConfigPrompt: FC<Props> = ({
|
||||||
hasSetBlockStatus,
|
hasSetBlockStatus,
|
||||||
varList = [],
|
varList = [],
|
||||||
handleAddVariable,
|
handleAddVariable,
|
||||||
modelConfig,
|
|
||||||
}) => {
|
}) => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
const workflowStore = useWorkflowStore()
|
const workflowStore = useWorkflowStore()
|
||||||
|
@ -201,7 +199,6 @@ const ConfigPrompt: FC<Props> = ({
|
||||||
availableNodes={availableNodesWithParent}
|
availableNodes={availableNodesWithParent}
|
||||||
varList={varList}
|
varList={varList}
|
||||||
handleAddVariable={handleAddVariable}
|
handleAddVariable={handleAddVariable}
|
||||||
modelConfig={modelConfig}
|
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
|
@ -237,7 +234,6 @@ const ConfigPrompt: FC<Props> = ({
|
||||||
onEditionTypeChange={handleCompletionEditionTypeChange}
|
onEditionTypeChange={handleCompletionEditionTypeChange}
|
||||||
handleAddVariable={handleAddVariable}
|
handleAddVariable={handleAddVariable}
|
||||||
onGenerated={handleGenerated}
|
onGenerated={handleGenerated}
|
||||||
modelConfig={modelConfig}
|
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
|
@ -7,19 +7,14 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
|
||||||
import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
|
import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
|
||||||
import { AppType } from '@/types/app'
|
import { AppType } from '@/types/app'
|
||||||
import type { AutomaticRes } from '@/service/debug'
|
import type { AutomaticRes } from '@/service/debug'
|
||||||
import type { ModelConfig } from '@/app/components/workflow/types'
|
|
||||||
import type { Model } from '@/types/app'
|
|
||||||
|
|
||||||
type Props = {
|
type Props = {
|
||||||
className?: string
|
className?: string
|
||||||
onGenerated?: (prompt: string) => void
|
onGenerated?: (prompt: string) => void
|
||||||
modelConfig?: ModelConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const PromptGeneratorBtn: FC<Props> = ({
|
const PromptGeneratorBtn: FC<Props> = ({
|
||||||
className,
|
className,
|
||||||
onGenerated,
|
onGenerated,
|
||||||
modelConfig,
|
|
||||||
}) => {
|
}) => {
|
||||||
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
|
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
|
||||||
const handleAutomaticRes = useCallback((res: AutomaticRes) => {
|
const handleAutomaticRes = useCallback((res: AutomaticRes) => {
|
||||||
|
@ -37,7 +32,6 @@ const PromptGeneratorBtn: FC<Props> = ({
|
||||||
isShow={showAutomatic}
|
isShow={showAutomatic}
|
||||||
onClose={showAutomaticFalse}
|
onClose={showAutomaticFalse}
|
||||||
onFinished={handleAutomaticRes}
|
onFinished={handleAutomaticRes}
|
||||||
model={modelConfig as Model}
|
|
||||||
isInLLMNode
|
isInLLMNode
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
|
|
@ -178,7 +178,6 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
|
||||||
hasSetBlockStatus={hasSetBlockStatus}
|
hasSetBlockStatus={hasSetBlockStatus}
|
||||||
varList={inputs.prompt_config?.jinja2_variables || []}
|
varList={inputs.prompt_config?.jinja2_variables || []}
|
||||||
handleAddVariable={handleAddVariable}
|
handleAddVariable={handleAddVariable}
|
||||||
modelConfig={model}
|
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ export type AutomaticRes = {
|
||||||
prompt: string
|
prompt: string
|
||||||
variables: string[]
|
variables: string[]
|
||||||
opening_statement: string
|
opening_statement: string
|
||||||
error?: string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export const sendChatMessage = async (appId: string, body: Record<string, any>, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {
|
export const sendChatMessage = async (appId: string, body: Record<string, any>, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {
|
||||||
|
|
109
web/types/app.ts
109
web/types/app.ts
|
@ -135,64 +135,9 @@ export enum AgentStrategy {
|
||||||
react = 'react',
|
react = 'react',
|
||||||
}
|
}
|
||||||
|
|
||||||
export type CompletionParams = {
|
|
||||||
/** Maximum number of tokens in the answer message returned by Completion */
|
|
||||||
max_tokens: number
|
|
||||||
/**
|
|
||||||
* A number between 0 and 2.
|
|
||||||
* The larger the number, the more random the result;
|
|
||||||
* otherwise, the more deterministic.
|
|
||||||
* When in use, choose either `temperature` or `top_p`.
|
|
||||||
* Default is 1.
|
|
||||||
*/
|
|
||||||
temperature: number
|
|
||||||
/**
|
|
||||||
* Represents the proportion of probability mass samples to take,
|
|
||||||
* e.g., 0.1 means taking the top 10% probability mass samples.
|
|
||||||
* The determinism between the samples is basically consistent.
|
|
||||||
* Among these results, the `top_p` probability mass results are taken.
|
|
||||||
* When in use, choose either `temperature` or `top_p`.
|
|
||||||
* Default is 1.
|
|
||||||
*/
|
|
||||||
top_p: number
|
|
||||||
/** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
|
|
||||||
echo: boolean
|
|
||||||
/**
|
|
||||||
* Specify up to 4 to automatically stop generating before the text specified in `stop`.
|
|
||||||
* Suitable for use in chat mode.
|
|
||||||
* For example, specify "Q" and "A",
|
|
||||||
* and provide some Q&A examples as context,
|
|
||||||
* and the model will give out in Q&A format and stop generating before Q&A.
|
|
||||||
*/
|
|
||||||
stop: string[]
|
|
||||||
/**
|
|
||||||
* A number between -2.0 and 2.0.
|
|
||||||
* The larger the value, the less the model will repeat topics and the more it will provide new topics.
|
|
||||||
*/
|
|
||||||
presence_penalty: number
|
|
||||||
/**
|
|
||||||
* A number between -2.0 and 2.0.
|
|
||||||
* A lower setting will make the model appear less cultured,
|
|
||||||
* always repeating expressions.
|
|
||||||
* The difference between `frequency_penalty` and `presence_penalty`
|
|
||||||
* is that `frequency_penalty` penalizes a word based on its frequency in the training data,
|
|
||||||
* while `presence_penalty` penalizes a word based on its occurrence in the input text.
|
|
||||||
*/
|
|
||||||
frequency_penalty: number
|
|
||||||
}
|
|
||||||
/**
|
/**
|
||||||
* Model configuration. The backend type.
|
* Model configuration. The backend type.
|
||||||
*/
|
*/
|
||||||
export type Model = {
|
|
||||||
/** LLM provider, e.g., OPENAI */
|
|
||||||
provider: string
|
|
||||||
/** Model name, e.g, gpt-3.5.turbo */
|
|
||||||
name: string
|
|
||||||
mode: ModelModeType
|
|
||||||
/** Default Completion call parameters */
|
|
||||||
completion_params: CompletionParams
|
|
||||||
}
|
|
||||||
|
|
||||||
export type ModelConfig = {
|
export type ModelConfig = {
|
||||||
opening_statement: string
|
opening_statement: string
|
||||||
suggested_questions?: string[]
|
suggested_questions?: string[]
|
||||||
|
@ -229,7 +174,59 @@ export type ModelConfig = {
|
||||||
strategy?: AgentStrategy
|
strategy?: AgentStrategy
|
||||||
tools: ToolItem[]
|
tools: ToolItem[]
|
||||||
}
|
}
|
||||||
model: Model
|
model: {
|
||||||
|
/** LLM provider, e.g., OPENAI */
|
||||||
|
provider: string
|
||||||
|
/** Model name, e.g, gpt-3.5.turbo */
|
||||||
|
name: string
|
||||||
|
mode: ModelModeType
|
||||||
|
/** Default Completion call parameters */
|
||||||
|
completion_params: {
|
||||||
|
/** Maximum number of tokens in the answer message returned by Completion */
|
||||||
|
max_tokens: number
|
||||||
|
/**
|
||||||
|
* A number between 0 and 2.
|
||||||
|
* The larger the number, the more random the result;
|
||||||
|
* otherwise, the more deterministic.
|
||||||
|
* When in use, choose either `temperature` or `top_p`.
|
||||||
|
* Default is 1.
|
||||||
|
*/
|
||||||
|
temperature: number
|
||||||
|
/**
|
||||||
|
* Represents the proportion of probability mass samples to take,
|
||||||
|
* e.g., 0.1 means taking the top 10% probability mass samples.
|
||||||
|
* The determinism between the samples is basically consistent.
|
||||||
|
* Among these results, the `top_p` probability mass results are taken.
|
||||||
|
* When in use, choose either `temperature` or `top_p`.
|
||||||
|
* Default is 1.
|
||||||
|
*/
|
||||||
|
top_p: number
|
||||||
|
/** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
|
||||||
|
echo: boolean
|
||||||
|
/**
|
||||||
|
* Specify up to 4 to automatically stop generating before the text specified in `stop`.
|
||||||
|
* Suitable for use in chat mode.
|
||||||
|
* For example, specify "Q" and "A",
|
||||||
|
* and provide some Q&A examples as context,
|
||||||
|
* and the model will give out in Q&A format and stop generating before Q&A.
|
||||||
|
*/
|
||||||
|
stop: string[]
|
||||||
|
/**
|
||||||
|
* A number between -2.0 and 2.0.
|
||||||
|
* The larger the value, the less the model will repeat topics and the more it will provide new topics.
|
||||||
|
*/
|
||||||
|
presence_penalty: number
|
||||||
|
/**
|
||||||
|
* A number between -2.0 and 2.0.
|
||||||
|
* A lower setting will make the model appear less cultured,
|
||||||
|
* always repeating expressions.
|
||||||
|
* The difference between `frequency_penalty` and `presence_penalty`
|
||||||
|
* is that `frequency_penalty` penalizes a word based on its frequency in the training data,
|
||||||
|
* while `presence_penalty` penalizes a word based on its occurrence in the input text.
|
||||||
|
*/
|
||||||
|
frequency_penalty: number
|
||||||
|
}
|
||||||
|
}
|
||||||
dataset_configs: DatasetConfigs
|
dataset_configs: DatasetConfigs
|
||||||
file_upload?: {
|
file_upload?: {
|
||||||
image: VisionSettings
|
image: VisionSettings
|
||||||
|
|
Loading…
Reference in New Issue
Block a user