Revert "chore: use node specify llm to auto generate prompt" (#6555)

This commit is contained in:
Joel 2024-07-23 13:31:32 +08:00 committed by GitHub
parent e80412df23
commit d726473c6d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 57 additions and 98 deletions

View File

@ -14,7 +14,6 @@ import PromptEditorHeightResizeWrap from './prompt-editor-height-resize-wrap'
import cn from '@/utils/classnames'
import { type PromptVariable } from '@/models/debug'
import Tooltip from '@/app/components/base/tooltip'
import type { CompletionParams } from '@/types/app'
import { AppType } from '@/types/app'
import { getNewVar, getVars } from '@/utils/var'
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
@ -59,7 +58,6 @@ const Prompt: FC<ISimplePromptInput> = ({
const { eventEmitter } = useEventEmitterContextContext()
const {
modelConfig,
completionParams,
dataSets,
setModelConfig,
setPrevPromptConfig,
@ -249,14 +247,6 @@ const Prompt: FC<ISimplePromptInput> = ({
{showAutomatic && (
<GetAutomaticResModal
mode={mode as AppType}
model={
{
provider: modelConfig.provider,
name: modelConfig.model_id,
mode: modelConfig.mode,
completion_params: completionParams as CompletionParams,
}
}
isShow={showAutomatic}
onClose={showAutomaticFalse}
onFinished={handleAutomaticRes}

View File

@ -20,7 +20,6 @@ import Button from '@/app/components/base/button'
import Toast from '@/app/components/base/toast'
import { generateRule } from '@/service/debug'
import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
import type { Model } from '@/types/app'
import { AppType } from '@/types/app'
import ConfigVar from '@/app/components/app/configuration/config-var'
import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement'
@ -34,7 +33,6 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
export type IGetAutomaticResProps = {
mode: AppType
model: Model
isShow: boolean
onClose: () => void
onFinished: (res: AutomaticRes) => void
@ -59,7 +57,6 @@ const TryLabel: FC<{
const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
mode,
model,
isShow,
onClose,
isInLLMNode,
@ -152,17 +149,10 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
return
setLoadingTrue()
try {
const { error, ...res } = await generateRule({
const res = await generateRule({
instruction,
model_config: model,
})
setRes(res)
if (error) {
Toast.notify({
type: 'error',
message: error,
})
}
}
finally {
setLoadingFalse()

View File

@ -9,7 +9,6 @@ import { useTranslation } from 'react-i18next'
import { useBoolean } from 'ahooks'
import { BlockEnum, EditionType } from '../../../../types'
import type {
ModelConfig,
Node,
NodeOutPutVar,
Variable,
@ -59,7 +58,6 @@ type Props = {
availableNodes?: Node[]
isSupportPromptGenerator?: boolean
onGenerated?: (prompt: string) => void
modelConfig?: ModelConfig
// for jinja
isSupportJinja?: boolean
editionType?: EditionType
@ -92,7 +90,6 @@ const Editor: FC<Props> = ({
varList = [],
handleAddVariable,
onGenerated,
modelConfig,
}) => {
const { t } = useTranslation()
const { eventEmitter } = useEventEmitterContextContext()
@ -133,7 +130,7 @@ const Editor: FC<Props> = ({
<div className='flex items-center'>
<div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div>
{isSupportPromptGenerator && (
<PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} modelConfig={modelConfig} />
<PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} />
)}
<div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div>

View File

@ -4,7 +4,7 @@ import React, { useCallback, useEffect, useState } from 'react'
import { uniqueId } from 'lodash-es'
import { useTranslation } from 'react-i18next'
import { RiQuestionLine } from '@remixicon/react'
import type { ModelConfig, PromptItem, Variable } from '../../../types'
import type { PromptItem, Variable } from '../../../types'
import { EditionType } from '../../../types'
import { useWorkflowStore } from '../../../store'
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
@ -38,7 +38,6 @@ type Props = {
availableNodes: any
varList: Variable[]
handleAddVariable: (payload: any) => void
modelConfig?: ModelConfig
}
const roleOptions = [
@ -78,7 +77,6 @@ const ConfigPromptItem: FC<Props> = ({
availableNodes,
varList,
handleAddVariable,
modelConfig,
}) => {
const { t } = useTranslation()
const workflowStore = useWorkflowStore()
@ -140,7 +138,6 @@ const ConfigPromptItem: FC<Props> = ({
availableNodes={availableNodes}
isSupportPromptGenerator={payload.role === PromptRole.system}
onGenerated={handleGenerated}
modelConfig={modelConfig}
isSupportJinja
editionType={payload.edition_type}
onEditionTypeChange={onEditionTypeChange}

View File

@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next'
import produce from 'immer'
import { ReactSortable } from 'react-sortablejs'
import { v4 as uuid4 } from 'uuid'
import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types'
import type { PromptItem, ValueSelector, Var, Variable } from '../../../types'
import { EditionType, PromptRole } from '../../../types'
import useAvailableVarList from '../../_base/hooks/use-available-var-list'
import { useWorkflowStore } from '../../../store'
@ -33,7 +33,6 @@ type Props = {
}
varList?: Variable[]
handleAddVariable: (payload: any) => void
modelConfig: ModelConfig
}
const ConfigPrompt: FC<Props> = ({
@ -48,7 +47,6 @@ const ConfigPrompt: FC<Props> = ({
hasSetBlockStatus,
varList = [],
handleAddVariable,
modelConfig,
}) => {
const { t } = useTranslation()
const workflowStore = useWorkflowStore()
@ -201,7 +199,6 @@ const ConfigPrompt: FC<Props> = ({
availableNodes={availableNodesWithParent}
varList={varList}
handleAddVariable={handleAddVariable}
modelConfig={modelConfig}
/>
</div>
)
@ -237,7 +234,6 @@ const ConfigPrompt: FC<Props> = ({
onEditionTypeChange={handleCompletionEditionTypeChange}
handleAddVariable={handleAddVariable}
onGenerated={handleGenerated}
modelConfig={modelConfig}
/>
</div>
)}

View File

@ -7,19 +7,14 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
import { AppType } from '@/types/app'
import type { AutomaticRes } from '@/service/debug'
import type { ModelConfig } from '@/app/components/workflow/types'
import type { Model } from '@/types/app'
type Props = {
className?: string
onGenerated?: (prompt: string) => void
modelConfig?: ModelConfig
}
const PromptGeneratorBtn: FC<Props> = ({
className,
onGenerated,
modelConfig,
}) => {
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
const handleAutomaticRes = useCallback((res: AutomaticRes) => {
@ -37,7 +32,6 @@ const PromptGeneratorBtn: FC<Props> = ({
isShow={showAutomatic}
onClose={showAutomaticFalse}
onFinished={handleAutomaticRes}
model={modelConfig as Model}
isInLLMNode
/>
)}

View File

@ -178,7 +178,6 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
hasSetBlockStatus={hasSetBlockStatus}
varList={inputs.prompt_config?.jinja2_variables || []}
handleAddVariable={handleAddVariable}
modelConfig={model}
/>
)}

View File

@ -7,7 +7,6 @@ export type AutomaticRes = {
prompt: string
variables: string[]
opening_statement: string
error?: string
}
export const sendChatMessage = async (appId: string, body: Record<string, any>, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {

View File

@ -135,64 +135,9 @@ export enum AgentStrategy {
react = 'react',
}
export type CompletionParams = {
/** Maximum number of tokens in the answer message returned by Completion */
max_tokens: number
/**
* A number between 0 and 2.
* The larger the number, the more random the result;
* otherwise, the more deterministic.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
temperature: number
/**
* Represents the proportion of probability mass samples to take,
* e.g., 0.1 means taking the top 10% probability mass samples.
* The determinism between the samples is basically consistent.
* Among these results, the `top_p` probability mass results are taken.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
top_p: number
/** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
echo: boolean
/**
* Specify up to 4 to automatically stop generating before the text specified in `stop`.
* Suitable for use in chat mode.
* For example, specify "Q" and "A",
* and provide some Q&A examples as context,
* and the model will give out in Q&A format and stop generating before Q&A.
*/
stop: string[]
/**
* A number between -2.0 and 2.0.
* The larger the value, the less the model will repeat topics and the more it will provide new topics.
*/
presence_penalty: number
/**
* A number between -2.0 and 2.0.
* A lower setting will make the model appear less cultured,
* always repeating expressions.
* The difference between `frequency_penalty` and `presence_penalty`
* is that `frequency_penalty` penalizes a word based on its frequency in the training data,
* while `presence_penalty` penalizes a word based on its occurrence in the input text.
*/
frequency_penalty: number
}
/**
* Model configuration. The backend type.
*/
export type Model = {
/** LLM provider, e.g., OPENAI */
provider: string
/** Model name, e.g, gpt-3.5.turbo */
name: string
mode: ModelModeType
/** Default Completion call parameters */
completion_params: CompletionParams
}
export type ModelConfig = {
opening_statement: string
suggested_questions?: string[]
@ -229,7 +174,59 @@ export type ModelConfig = {
strategy?: AgentStrategy
tools: ToolItem[]
}
model: Model
model: {
/** LLM provider, e.g., OPENAI */
provider: string
/** Model name, e.g, gpt-3.5.turbo */
name: string
mode: ModelModeType
/** Default Completion call parameters */
completion_params: {
/** Maximum number of tokens in the answer message returned by Completion */
max_tokens: number
/**
* A number between 0 and 2.
* The larger the number, the more random the result;
* otherwise, the more deterministic.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
temperature: number
/**
* Represents the proportion of probability mass samples to take,
* e.g., 0.1 means taking the top 10% probability mass samples.
* The determinism between the samples is basically consistent.
* Among these results, the `top_p` probability mass results are taken.
* When in use, choose either `temperature` or `top_p`.
* Default is 1.
*/
top_p: number
/** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
echo: boolean
/**
* Specify up to 4 to automatically stop generating before the text specified in `stop`.
* Suitable for use in chat mode.
* For example, specify "Q" and "A",
* and provide some Q&A examples as context,
* and the model will give out in Q&A format and stop generating before Q&A.
*/
stop: string[]
/**
* A number between -2.0 and 2.0.
* The larger the value, the less the model will repeat topics and the more it will provide new topics.
*/
presence_penalty: number
/**
* A number between -2.0 and 2.0.
* A lower setting will make the model appear less cultured,
* always repeating expressions.
* The difference between `frequency_penalty` and `presence_penalty`
* is that `frequency_penalty` penalizes a word based on its frequency in the training data,
* while `presence_penalty` penalizes a word based on its occurrence in the input text.
*/
frequency_penalty: number
}
}
dataset_configs: DatasetConfigs
file_upload?: {
image: VisionSettings