Merge pull request #852 from automatisch/openai-send-prompt
feat(openai): add send prompt action
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
import checkModeration from './check-moderation';
|
||||
import sendPrompt from './send-prompt';
|
||||
|
||||
export default [checkModeration];
|
||||
export default [checkModeration, sendPrompt];
|
||||
|
104
packages/backend/src/apps/openai/actions/send-prompt/index.ts
Normal file
104
packages/backend/src/apps/openai/actions/send-prompt/index.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
import defineAction from '../../../../helpers/define-action';
|
||||
|
||||
const castFloatOrUndefined = (value: string | null) => {
|
||||
return value === '' ? undefined : parseFloat(value);
|
||||
}
|
||||
|
||||
export default defineAction({
|
||||
name: 'Send prompt',
|
||||
key: 'sendPrompt',
|
||||
description: 'Creates a completion for the provided prompt and parameters.',
|
||||
arguments: [
|
||||
{
|
||||
label: 'Model',
|
||||
key: 'model',
|
||||
type: 'dropdown' as const,
|
||||
required: true,
|
||||
variables: false,
|
||||
source: {
|
||||
type: 'query',
|
||||
name: 'getDynamicData',
|
||||
arguments: [
|
||||
{
|
||||
name: 'key',
|
||||
value: 'listModels',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
label: 'Prompt',
|
||||
key: 'prompt',
|
||||
type: 'string' as const,
|
||||
required: true,
|
||||
variables: true,
|
||||
description: 'The text to analyze.'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
key: 'temperature',
|
||||
type: 'string' as const,
|
||||
required: false,
|
||||
variables: true,
|
||||
description: 'What sampling temperature to use. Higher values mean the model will take more risk. Try 0.9 for more creative applications, and 0 for ones with a well-defined answer. We generally recommend altering this or Top P but not both.'
|
||||
},
|
||||
{
|
||||
label: 'Maximum tokens',
|
||||
key: 'maxTokens',
|
||||
type: 'string' as const,
|
||||
required: false,
|
||||
variables: true,
|
||||
description: 'The maximum number of tokens to generate in the completion.'
|
||||
},
|
||||
{
|
||||
label: 'Stop Sequence',
|
||||
key: 'stopSequence',
|
||||
type: 'string' as const,
|
||||
required: false,
|
||||
variables: true,
|
||||
description: 'Single stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence.'
|
||||
},
|
||||
{
|
||||
label: 'Top P',
|
||||
key: 'topP',
|
||||
type: 'string' as const,
|
||||
required: false,
|
||||
variables: true,
|
||||
description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with Top P probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.'
|
||||
},
|
||||
{
|
||||
label: 'Frequency Penalty',
|
||||
key: 'frequencyPenalty',
|
||||
type: 'string' as const,
|
||||
required: false,
|
||||
variables: true,
|
||||
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`
|
||||
},
|
||||
{
|
||||
label: 'presencePenalty',
|
||||
key: 'presencePenalty',
|
||||
type: 'string' as const,
|
||||
required: false,
|
||||
variables: true,
|
||||
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.`
|
||||
},
|
||||
],
|
||||
|
||||
async run($) {
|
||||
const payload = {
|
||||
model: $.step.parameters.model as string,
|
||||
prompt: $.step.parameters.prompt as string,
|
||||
temperature: castFloatOrUndefined($.step.parameters.temperature as string),
|
||||
max_tokens: castFloatOrUndefined($.step.parameters.maxTokens as string),
|
||||
stop: ($.step.parameters.stopSequence as string || null),
|
||||
top_p: castFloatOrUndefined($.step.parameters.topP as string),
|
||||
frequency_penalty: castFloatOrUndefined($.step.parameters.frequencyPenalty as string),
|
||||
presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty as string),
|
||||
};
|
||||
const { data } = await $.http.post('/v1/completions', payload);
|
||||
|
||||
$.setActionItem({
|
||||
raw: data,
|
||||
});
|
||||
},
|
||||
});
|
3
packages/backend/src/apps/openai/dynamic-data/index.ts
Normal file
3
packages/backend/src/apps/openai/dynamic-data/index.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
import listModels from './list-models';
|
||||
|
||||
export default [listModels];
|
@@ -0,0 +1,19 @@
|
||||
import { IGlobalVariable } from '@automatisch/types';
|
||||
|
||||
export default {
|
||||
name: 'List models',
|
||||
key: 'listModels',
|
||||
|
||||
async run($: IGlobalVariable) {
|
||||
const response = await $.http.get('/v1/models');
|
||||
|
||||
const models = response.data.data.map((model: { id: string }) => {
|
||||
return {
|
||||
value: model.id,
|
||||
name: model.id,
|
||||
};
|
||||
});
|
||||
|
||||
return { data: models };
|
||||
},
|
||||
};
|
@@ -2,6 +2,7 @@ import defineApp from '../../helpers/define-app';
|
||||
import addAuthHeader from './common/add-auth-header';
|
||||
import auth from './auth';
|
||||
import actions from './actions';
|
||||
import dynamicData from './dynamic-data';
|
||||
|
||||
export default defineApp({
|
||||
name: 'OpenAI',
|
||||
@@ -15,4 +16,5 @@ export default defineApp({
|
||||
beforeRequest: [addAuthHeader],
|
||||
auth,
|
||||
actions,
|
||||
dynamicData,
|
||||
});
|
||||
|
@@ -1,8 +1,10 @@
|
||||
---
|
||||
favicon: /favicons/openai.svg
|
||||
items:
|
||||
- name: Check moderation Text
|
||||
- name: Check moderation
|
||||
desc: Checks for hate, hate/threatening, self-harm, sexual, sexual/minors, violence, or violence/graphic content in text.
|
||||
- name: Send prompt
|
||||
desc: Creates a completion for the provided prompt and parameters.
|
||||
---
|
||||
|
||||
<script setup>
|
||||
|
Reference in New Issue
Block a user