feat: Convert all app files to JS

This commit is contained in:
Faruk AYDIN
2024-01-05 17:44:21 +01:00
parent b95478b635
commit 43dba351c3
1030 changed files with 5114 additions and 6436 deletions

View File

@@ -0,0 +1,4 @@
import sendPrompt from './send-prompt/index.js';
import sendChatPrompt from './send-chat-prompt/index.js';
export default [sendChatPrompt, sendPrompt];

View File

@@ -1,4 +0,0 @@
import sendPrompt from './send-prompt';
import sendChatPrompt from './send-chat-prompt';
export default [sendChatPrompt, sendPrompt];

View File

@@ -1,13 +1,8 @@
import defineAction from '../../../../helpers/define-action';
import defineAction from '../../../../helpers/define-action.js';
type TMessage = {
role: string;
content: string;
}
const castFloatOrUndefined = (value: string | null) => {
const castFloatOrUndefined = (value) => {
return value === '' ? undefined : parseFloat(value);
}
};
export default defineAction({
name: 'Send chat prompt',
@@ -17,7 +12,7 @@ export default defineAction({
{
label: 'Model',
key: 'model',
type: 'dropdown' as const,
type: 'dropdown',
required: true,
variables: true,
source: {
@@ -34,7 +29,7 @@ export default defineAction({
{
label: 'Messages',
key: 'messages',
type: 'dynamic' as const,
type: 'dynamic',
required: true,
description: 'Add or remove messages as needed',
value: [{ role: 'system', body: '' }],
@@ -42,7 +37,7 @@ export default defineAction({
{
label: 'Role',
key: 'role',
type: 'dropdown' as const,
type: 'dropdown',
required: true,
options: [
{
@@ -52,78 +47,84 @@ export default defineAction({
{
label: 'User',
value: 'user',
}
},
],
},
{
label: 'Content',
key: 'content',
type: 'string' as const,
type: 'string',
required: true,
variables: true,
}
},
],
},
{
label: 'Temperature',
key: 'temperature',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'What sampling temperature to use. Higher values mean the model will take more risk. Try 0.9 for more creative applications, and 0 for ones with a well-defined answer. We generally recommend altering this or Top P but not both.'
description:
'What sampling temperature to use. Higher values mean the model will take more risk. Try 0.9 for more creative applications, and 0 for ones with a well-defined answer. We generally recommend altering this or Top P but not both.',
},
{
label: 'Maximum tokens',
key: 'maxTokens',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'The maximum number of tokens to generate in the completion.'
description:
'The maximum number of tokens to generate in the completion.',
},
{
label: 'Stop Sequence',
key: 'stopSequence',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'Single stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence.'
description:
'Single stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence.',
},
{
label: 'Top P',
key: 'topP',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with Top P probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.'
description:
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with Top P probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
},
{
label: 'Frequency Penalty',
key: 'frequencyPenalty',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`,
},
{
label: 'Presence Penalty',
key: 'presencePenalty',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.`
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.`,
},
],
async run($) {
const payload = {
model: $.step.parameters.model as string,
temperature: castFloatOrUndefined($.step.parameters.temperature as string),
max_tokens: castFloatOrUndefined($.step.parameters.maxTokens as string),
stop: ($.step.parameters.stopSequence as string || null),
top_p: castFloatOrUndefined($.step.parameters.topP as string),
frequency_penalty: castFloatOrUndefined($.step.parameters.frequencyPenalty as string),
presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty as string),
messages: ($.step.parameters.messages as TMessage[]).map(message => ({
model: $.step.parameters.model,
temperature: castFloatOrUndefined($.step.parameters.temperature),
max_tokens: castFloatOrUndefined($.step.parameters.maxTokens),
stop: $.step.parameters.stopSequence || null,
top_p: castFloatOrUndefined($.step.parameters.topP),
frequency_penalty: castFloatOrUndefined(
$.step.parameters.frequencyPenalty
),
presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty),
messages: $.step.parameters.messages.map((message) => ({
role: message.role,
content: message.content,
})),

View File

@@ -1,8 +1,8 @@
import defineAction from '../../../../helpers/define-action';
import defineAction from '../../../../helpers/define-action.js';
const castFloatOrUndefined = (value: string | null) => {
const castFloatOrUndefined = (value) => {
return value === '' ? undefined : parseFloat(value);
}
};
export default defineAction({
name: 'Send prompt',
@@ -12,7 +12,7 @@ export default defineAction({
{
label: 'Model',
key: 'model',
type: 'dropdown' as const,
type: 'dropdown',
required: true,
variables: true,
source: {
@@ -29,71 +29,77 @@ export default defineAction({
{
label: 'Prompt',
key: 'prompt',
type: 'string' as const,
type: 'string',
required: true,
variables: true,
description: 'The text to analyze.'
description: 'The text to analyze.',
},
{
label: 'Temperature',
key: 'temperature',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'What sampling temperature to use. Higher values mean the model will take more risk. Try 0.9 for more creative applications, and 0 for ones with a well-defined answer. We generally recommend altering this or Top P but not both.'
description:
'What sampling temperature to use. Higher values mean the model will take more risk. Try 0.9 for more creative applications, and 0 for ones with a well-defined answer. We generally recommend altering this or Top P but not both.',
},
{
label: 'Maximum tokens',
key: 'maxTokens',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'The maximum number of tokens to generate in the completion.'
description:
'The maximum number of tokens to generate in the completion.',
},
{
label: 'Stop Sequence',
key: 'stopSequence',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'Single stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence.'
description:
'Single stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence.',
},
{
label: 'Top P',
key: 'topP',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with Top P probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.'
description:
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with Top P probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
},
{
label: 'Frequency Penalty',
key: 'frequencyPenalty',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`,
},
{
label: 'Presence Penalty',
key: 'presencePenalty',
type: 'string' as const,
type: 'string',
required: false,
variables: true,
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.`
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.`,
},
],
async run($) {
const payload = {
model: $.step.parameters.model as string,
prompt: $.step.parameters.prompt as string,
temperature: castFloatOrUndefined($.step.parameters.temperature as string),
max_tokens: castFloatOrUndefined($.step.parameters.maxTokens as string),
stop: ($.step.parameters.stopSequence as string || null),
top_p: castFloatOrUndefined($.step.parameters.topP as string),
frequency_penalty: castFloatOrUndefined($.step.parameters.frequencyPenalty as string),
presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty as string),
model: $.step.parameters.model,
prompt: $.step.parameters.prompt,
temperature: castFloatOrUndefined($.step.parameters.temperature),
max_tokens: castFloatOrUndefined($.step.parameters.maxTokens),
stop: $.step.parameters.stopSequence || null,
top_p: castFloatOrUndefined($.step.parameters.topP),
frequency_penalty: castFloatOrUndefined(
$.step.parameters.frequencyPenalty
),
presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty),
};
const { data } = await $.http.post('/v1/completions', payload);

View File

@@ -1,12 +1,12 @@
import verifyCredentials from './verify-credentials';
import isStillVerified from './is-still-verified';
import verifyCredentials from './verify-credentials.js';
import isStillVerified from './is-still-verified.js';
export default {
fields: [
{
key: 'screenName',
label: 'Screen Name',
type: 'string' as const,
type: 'string',
required: true,
readOnly: false,
value: null,
@@ -18,7 +18,7 @@ export default {
{
key: 'apiUrl',
label: 'API URL',
type: 'string' as const,
type: 'string',
required: true,
readOnly: false,
value: null,
@@ -29,7 +29,7 @@ export default {
{
key: 'apiKey',
label: 'API Key',
type: 'string' as const,
type: 'string',
required: true,
readOnly: false,
value: null,

View File

@@ -0,0 +1,6 @@
const isStillVerified = async ($) => {
const r = await $.http.get('/v1/models');
return true;
};
export default isStillVerified;

View File

@@ -1,8 +0,0 @@
import { IGlobalVariable } from '@automatisch/types';
const isStillVerified = async ($: IGlobalVariable) => {
const r = await $.http.get('/v1/models');
return true;
};
export default isStillVerified;

View File

@@ -0,0 +1,5 @@
const verifyCredentials = async ($) => {
await $.http.get('/v1/models');
};
export default verifyCredentials;

View File

@@ -1,7 +0,0 @@
import { IGlobalVariable } from '@automatisch/types';
const verifyCredentials = async ($: IGlobalVariable) => {
await $.http.get('/v1/models');
};
export default verifyCredentials;

View File

@@ -1,6 +1,4 @@
import { TBeforeRequest } from '@automatisch/types';
const addAuthHeader: TBeforeRequest = ($, requestConfig) => {
const addAuthHeader = ($, requestConfig) => {
if ($.auth.data?.apiKey) {
requestConfig.headers.Authorization = `Bearer ${$.auth.data.apiKey}`;
}

View File

@@ -0,0 +1,9 @@
const setBaseUrl = ($, requestConfig) => {
if ($.auth.data.apiUrl) {
requestConfig.baseURL = $.auth.data.apiUrl;
}
return requestConfig;
};
export default setBaseUrl;

View File

@@ -1,11 +0,0 @@
import { TBeforeRequest } from '@automatisch/types';
const setBaseUrl: TBeforeRequest = ($, requestConfig) => {
if ($.auth.data.apiUrl) {
requestConfig.baseURL = $.auth.data.apiUrl as string;
}
return requestConfig;
};
export default setBaseUrl;

View File

@@ -0,0 +1,3 @@
import listModels from './list-models/index.js';
export default [listModels];

View File

@@ -1,3 +0,0 @@
import listModels from './list-models';
export default [listModels];

View File

@@ -1,13 +1,11 @@
import { IGlobalVariable } from '@automatisch/types';
export default {
name: 'List models',
key: 'listModels',
async run($: IGlobalVariable) {
async run($) {
const response = await $.http.get('/v1/models');
const models = response.data.data.map((model: { id: string }) => {
const models = response.data.data.map((model) => {
return {
value: model.id,
name: model.id,

View File

@@ -1,9 +1,9 @@
import defineApp from '../../helpers/define-app';
import addAuthHeader from './common/add-auth-header';
import setBaseUrl from './common/set-base-url';
import auth from './auth';
import actions from './actions';
import dynamicData from './dynamic-data';
import defineApp from '../../helpers/define-app.js';
import addAuthHeader from './common/add-auth-header.js';
import setBaseUrl from './common/set-base-url.js';
import auth from './auth/index.js';
import actions from './actions/index.js';
import dynamicData from './dynamic-data/index.js';
export default defineApp({
name: 'Self-hosted LLM',