This commit is contained in:
duanfuxiang
2025-01-05 11:51:39 +08:00
commit 0c7ee142cb
215 changed files with 20611 additions and 0 deletions

View File

@@ -0,0 +1,610 @@
import { Notice } from "obsidian";
import * as React from "react";
import { useState } from "react";
import {
InfioSettings,
} from '../types/settings';
import { checkForErrors } from "../utils/auto-complete";
import CheckBoxSettingItem from "./components/CheckBoxSettingItem";
import FewShotExampleSettings from "./components/FewShotExampleSettings";
import SettingsItem from "./components/SettingsItem";
import SliderSettingsItem from "./components/SliderSettingsItem";
import TextSettingItem from "./components/TextSettingItem";
import TriggerSettings from "./components/TriggerSettings";
import {
MAX_DELAY,
MAX_FREQUENCY_PENALTY,
MAX_MAX_CHAR_LIMIT,
MAX_MAX_TOKENS,
MAX_PRESENCE_PENALTY,
MAX_TEMPERATURE,
MAX_TOP_P,
MIN_DELAY,
MIN_FREQUENCY_PENALTY,
MIN_MAX_CHAR_LIMIT,
MIN_MAX_TOKENS,
MIN_PRESENCE_PENALTY,
MIN_TEMPERATURE,
MIN_TOP_P
} from "./versions";
interface IProps {
onSettingsChanged(settings: InfioSettings): void;
settings: InfioSettings;
}
export default function AutoCompleteSettings(props: IProps): React.JSX.Element {
const [settings, _setSettings] = useState<InfioSettings>(props.settings);
const errors = checkForErrors(settings);
React.useEffect(() => {
_setSettings(props.settings);
}, [props.settings]);
const updateSettings = (update: Partial<InfioSettings>) => {
_setSettings((settings: InfioSettings) => {
const newSettings = { ...settings, ...update };
props.onSettingsChanged(newSettings);
return newSettings;
});
};
const resetSettings = () => {
// const azureOAIApiSettings = settings.azureOAIApiSettings,
// const openAIApiSettings = settings.openAIApiSettings,
// const ollamaApiSettings = settings.ollamaApiSettings
// const newSettings: SmartCopilotSettings = {
// ...settings
// azureOAIApiSettings,
// openAIApiSettings,
// ollamaApiSettings,
// advancedMode: settings.advancedMode,
// };
// updateSettings(newSettings);
new Notice("Factory reset complete.");
};
// const renderAPISettings = () => {
// if (settings.apiProvider === "azure") {
// return (
// <>
// <TextSettingItem
// name={"Azure OAI API URL"}
// description={
// "The Azure OpenAI services API URL is used in the requests."
// }
// placeholder={"Your API URL..."}
// value={settings.azureOAIApiSettings.url}
// errorMessage={errors.get("azureOAIApiSettings.url")}
// setValue={(value: string) =>
// updateSettings({
// azureOAIApiSettings: {
// ...settings.azureOAIApiSettings,
// url: value,
// },
// })
// }
// />
// <TextSettingItem
// name={"Azure API key"}
// description={
// "The Azure OpenAI services API key used in the requests."
// }
// placeholder={"Your API key..."}
// password
// value={settings.azureOAIApiSettings.key}
// errorMessage={errors.get("azureOAIApiSettings.key")}
// setValue={(value: string) =>
// updateSettings({
// azureOAIApiSettings: {
// ...settings.azureOAIApiSettings,
// key: value,
// },
// })
// }
// />
// <ConnectivityCheck key={"azure"} settings={settings} />
// </>
// );
// }
// if (settings.apiProvider === "openai") {
// return (
// <>
// <TextSettingItem
// name={"OpenAI API URL"}
// description={
// "The URL used in the requests."
// }
// placeholder={"Your API URL..."}
// value={settings.openAIApiSettings.url}
// errorMessage={errors.get("openAIApiSettings.url")}
// setValue={(value: string) =>
// updateSettings({
// openAIApiSettings: {
// ...settings.openAIApiSettings,
// url: value,
// },
// })
// }
// />
// <TextSettingItem
// name={"OpenAI API key"}
// description={"The API key used in the requests."}
// placeholder={"Your API key..."}
// password
// value={settings.openAIApiSettings.key}
// errorMessage={errors.get("openAIApiSettings.key")}
// setValue={(value: string) =>
// updateSettings({
// openAIApiSettings: {
// ...settings.openAIApiSettings,
// key: value,
// },
// })
// }
// />
// <TextSettingItem
// name={"Model"}
// description={"The value of the model parameter in the request body."}
// placeholder="gpt-3.5-turbo"
// value={settings.openAIApiSettings.model}
// setValue={(value: string) =>
// updateSettings({
// openAIApiSettings: {
// ...settings.openAIApiSettings,
// model: value,
// }
// })
// }
// errorMessage={errors.get("openAIApiSettings.model")}
// />
// <ConnectivityCheck key={"openai"} settings={settings} />
// </>
// );
// }
// if (settings.apiProvider === "ollama") {
// return (
// <>
// <TextSettingItem
// name={"API URL"}
// description={
// "The URL used in the requests."
// }
// placeholder={"Your API URL..."}
// value={settings.ollamaApiSettings.url}
// errorMessage={errors.get("ollamaApiSettings.url")}
// setValue={(value: string) =>
// updateSettings({
// ollamaApiSettings: {
// ...settings.ollamaApiSettings,
// url: value,
// },
// })
// }
// />
// <TextSettingItem
// name={"Model"}
// description={"The model you have locally running using OLLAMA."}
// placeholder="Your model name..."
// value={settings.ollamaApiSettings.model}
// setValue={(value: string) =>
// updateSettings({
// ollamaApiSettings: {
// ...settings.ollamaApiSettings,
// model: value,
// }
// })
// }
// errorMessage={errors.get("ollamaApiSettings.model")}
// />
// <ConnectivityCheck key={"openai"} settings={settings} />
// </>
// );
// }
// };
return (
<div>
<h2>AutoComplete</h2>
<CheckBoxSettingItem
name={"Enable"}
description={
"If disabled, nothing will trigger the extension or can result in an API call."
}
enabled={settings.autocompleteEnabled}
setEnabled={(value) => updateSettings({ autocompleteEnabled: value })}
/>
<CheckBoxSettingItem
name={"Cache completions"}
description={
"If disabled, the plugin will not cache the completions. After accepting or rejecting a completion, the plugin will not remember it. This might result in more API calls."
}
enabled={settings.cacheSuggestions}
setEnabled={(value) => updateSettings({ cacheSuggestions: value })}
/>
{/* <DropDownSettingItem
name={"API provider"}
description={
"The plugin supports multiple API providers. Each provider might require different settings."
}
value={settings.apiProvider}
setValue={(value: string) => {
if (value === "openai" || value === "azure" || value === "ollama") {
updateSettings({ apiProvider: value });
}
}}
options={{
openai: "OpenAI API",
azure: "Azure OAI API",
ollama: "Self-hosted OLLAMA API"
}}
errorMessage={errors.get("apiProvider")}
/> */}
<CheckBoxSettingItem
name={"Debug mode"}
description={
"If enabled, various debug messages will be logged to the console, such as the complete response from the API, including the chain of thought tokens."
}
enabled={settings.debugMode}
setEnabled={(value) => updateSettings({ debugMode: value })}
/>
{/* <h2>API</h2>
{renderAPISettings()} */}
<h2>Model Options</h2>
<SliderSettingsItem
name={"Temperature"}
description={
"This parameter affects randomness in the sampling. Lower values result in more repetitive and deterministic responses. Higher temperatures will result in more unexpected or creative responses."
}
value={settings.modelOptions.temperature}
errorMessage={errors.get("modelOptions.temperature")}
setValue={(value: number) =>
updateSettings({
modelOptions: {
...settings.modelOptions,
temperature: value,
},
})
}
min={MIN_TEMPERATURE}
max={MAX_TEMPERATURE}
step={0.05}
/>
<SliderSettingsItem
name={"TopP"}
description={
"Like the temperature parameter, the Top P parameter affects the randomness in sampling. Lowering the value will limit the model's token selection to likelier tokens while increasing the value expands the model's token selection with lower likelihood tokens."
}
value={settings.modelOptions.top_p}
errorMessage={errors.get("modelOptions.top_p")}
setValue={(value: number) =>
updateSettings({
modelOptions: {
...settings.modelOptions,
top_p: value,
},
})
}
min={MIN_TOP_P}
max={MAX_TOP_P}
step={0.05}
/>
{settings.apiProvider !== "ollama" && (<>
<SliderSettingsItem
name={"Frequency Penalty"}
description={
"This parameter reduces the chance of repeating a token proportionally based on how often it has appeared in the text so far. This decreases the likelihood of repeating the exact same text in a response."
}
value={settings.modelOptions.frequency_penalty}
errorMessage={errors.get("modelOptions.frequency_penalty")}
setValue={(value: number) =>
updateSettings({
modelOptions: {
...settings.modelOptions,
frequency_penalty: value,
},
})
}
min={MIN_FREQUENCY_PENALTY}
max={MAX_FREQUENCY_PENALTY}
step={0.05}
/>
<SliderSettingsItem
name={"Presence Penalty"}
description={
"This parameter reduces the chance of repeating any token that has appeared in the text so far. This increases the likelihood of introducing new topics in a response."
}
value={settings.modelOptions.presence_penalty}
errorMessage={errors.get("modelOptions.presence_penalty")}
setValue={(value: number) =>
updateSettings({
modelOptions: {
...settings.modelOptions,
presence_penalty: value,
},
})
}
min={MIN_PRESENCE_PENALTY}
max={MAX_PRESENCE_PENALTY}
step={0.05}
/>
<SliderSettingsItem
name={"Max Tokens"}
description={
"This parameter changes the maximum number of tokens the model is allowed to generate. This includes the chain of thought tokens before the answer."
}
value={settings.modelOptions.max_tokens}
errorMessage={errors.get("modelOptions.max_tokens")}
setValue={(value: number) =>
updateSettings({
modelOptions: {
...settings.modelOptions,
max_tokens: value,
},
})
}
min={MIN_MAX_TOKENS}
max={MAX_MAX_TOKENS}
step={10}
/>
</>)}
<h2>Preprocessing</h2>
<CheckBoxSettingItem
name={"Don't include dataviews"}
description={
"Dataview(js) blocks can be quite long while not providing much value to the AI. If this setting is enabled, data view blocks will be removed promptly to reduce the number of tokens. This could save you some money in the long run."
}
enabled={settings.dontIncludeDataviews}
setEnabled={(value) =>
updateSettings({ dontIncludeDataviews: value })
}
/>
<SliderSettingsItem
name={"Maximum Prefix Length"}
description={
"The maximum number of characters that will be included in the prefix. A larger value will increase the context for the completion, but it can also increase the cost or push you over the token limit."
}
value={settings.maxPrefixCharLimit}
errorMessage={errors.get("maxPrefixCharLimit")}
setValue={(value: number) =>
updateSettings({ maxPrefixCharLimit: value })
}
min={MIN_MAX_CHAR_LIMIT}
max={MAX_MAX_CHAR_LIMIT}
step={100}
suffix={" chars"}
/>
<SliderSettingsItem
name={"Maximum Suffix Length"}
description={
"The maximum number of characters that will be included in the suffix. A larger value will increase the context for the completion, but it can also increase the cost or push you over the token limit."
}
value={settings.maxSuffixCharLimit}
errorMessage={errors.get("maxSuffixCharLimit")}
setValue={(value: number) =>
updateSettings({ maxSuffixCharLimit: value })
}
min={MIN_MAX_CHAR_LIMIT}
max={MAX_MAX_CHAR_LIMIT}
step={100}
suffix={" chars"}
/>
<h2>Postprocessing</h2>
<CheckBoxSettingItem
name={"Auto remove duplicate mat block indicators"}
description={
"The AI model might eagerly add a math block indicator ($), even though the cursor is already inside a math block. If this setting is enabled, the plugin will automatically remove these duplicate indicators from the completion."
}
enabled={settings.removeDuplicateMathBlockIndicator}
setEnabled={(value) =>
updateSettings({ removeDuplicateMathBlockIndicator: value })
}
/>
<CheckBoxSettingItem
name={"Auto remove duplicate mat block indicators"}
description={
"The AI model might eagerly add a code block indicator (`), even though the cursor is already inside a code block. If this setting is enabled, the plugin will automatically remove these duplicate indicators from the completion."
}
enabled={settings.removeDuplicateCodeBlockIndicator}
setEnabled={(value) =>
updateSettings({ removeDuplicateCodeBlockIndicator: value })
}
/>
<h2>Trigger</h2>
<SliderSettingsItem
name={"Delay"}
description={
"Delay in ms between the last character typed and the completion request."
}
value={settings.delay}
errorMessage={errors.get("delay")}
setValue={(value: number) => updateSettings({ delay: value })}
min={MIN_DELAY}
max={MAX_DELAY}
step={100}
suffix={"ms"}
/>
<TriggerSettings
name={"Trigger words"}
description={
"Completions will be triggered if the text before the matches any of these words or characters. This can either be a direct string match or a regex match. When using a regex, make sure to include the end of line character ($)."
}
triggers={settings.triggers}
setValues={(triggers) => updateSettings({ triggers })}
errorMessage={errors.get("triggerWords")}
errorMessages={errors}
/>
<h2>Privacy</h2>
<SettingsItem
name={"Ignored files"}
description={
<div>
<p>This field enables you to specify files and directories that the plugin should ignore. When
you open any of these files, the plugin will automatically disable itself and display a
'disabled' status in the bottom menu. Enter one pattern per line. These patterns function
similar to glob patterns. Here are some frequently used patterns:</p>
<ul>
<li><code>path/to/folder/**</code>: This pattern ignores all files and sub folders within
this folder.
</li>
<li><code>"**/secret/**"</code>: This pattern ignores any file located inside a 'secret'
directory,
regardless of its location in the path.
</li>
<li><code>!path/to/folder/example.md</code>: This pattern explicitly undoes an ignore,
making this file noticeable to the plugin.
</li>
<li><code>**/*Python*.md</code>: This pattern ignores any file with 'Python' in its name,
irrespective of its location.
</li>
</ul>
</div>
}
display={"block"}
errorMessage={errors.get("ignoredFilePatterns")}
>
<textarea
className="infio-autocomplete-setting-item-textarea"
rows={10}
placeholder="Your file patterns, e.g., **/secret/**"
value={settings.ignoredFilePatterns}
onChange={(e) =>
updateSettings({
ignoredFilePatterns: e.target.value
})
}
/>
</SettingsItem>
<SettingsItem
name={"Ignored tags"}
description={
<div>
<p>Files containing any of these tags will be ignored. When you open a file containing a
tag listed here, the plugin will automatically disable itself and display a 'disabled'
status in the bottom menu. Enter one tag per line.
</p>
</div>
}
display={"block"}
errorMessage={errors.get("ignoredTags")}
>
<textarea
className="infio-autocomplete-setting-item-textarea"
rows={10}
placeholder="Your file tags, e.g., secret"
value={settings.ignoredTags}
onChange={(e) =>
updateSettings({
ignoredTags: e.target.value
})
}
/>
</SettingsItem>
<h2>Danger zone</h2>
<SettingsItem
name={"Factory Reset"}
description={
"Messed-up the settings? No worries, press this button! After that, the plugin will go back to the default settings. The URL and API key will remain unchanged."
}
>
<button
aria-label="Reset to default settings"
onClick={resetSettings}
>
Reset
</button>
</SettingsItem>
<CheckBoxSettingItem
name={"Advanced mode"}
description={
"If you are familiar with prompt engineering, you can enable this setting to view the prompt generation and a few shot example settings. Turn off this button. It will not reset your changes; use the factory reset button for that."
}
enabled={settings.advancedMode}
setEnabled={(value) => updateSettings({ advancedMode: value })}
/>
{settings.advancedMode && (
<>
<h2>Advanced</h2>
<TextSettingItem
name={"Chain of thought removal regex"}
description={
"This regex is used to remove the chain of thought tokens from the generated answer. If it is not implemented correctly, the chain of thought tokens will be included in the suggested completion."
}
placeholder={"your regex..."}
value={settings.chainOfThoughRemovalRegex}
errorMessage={errors.get("chainOfThoughRemovalRegex")}
setValue={(value: string) =>
updateSettings({
chainOfThoughRemovalRegex: value,
})
}
/>
<SettingsItem
name={"System Message"}
description={
"This system message gives the models all the context and instructions they need to complete the answer generation tasks. You can edit this message to your liking. If you edit the chain of thought formatting, make sure to update the extract regex and examples accordingly."
}
display={"block"}
errorMessage={errors.get("systemMessage")}
>
<textarea
className="infio-autocomplete-setting-item-textarea"
rows={10}
placeholder="Your system message..."
value={settings.systemMessage}
onChange={(e) =>
updateSettings({
systemMessage: e.target.value,
})
}
/>
</SettingsItem>
<SettingsItem
name={"User Message template"}
description={
"This template defines how the prefix and suffix are formatted to create the user message. You have access to two variables: {{prefix}} and {{suffix}}. If you edit this, make sure to update the examples accordingly."
}
display={"block"}
errorMessage={errors.get("userMessageTemplate")}
>
<textarea
className="infio-autocomplete-setting-item-textarea"
rows={3}
placeholder="{{prefix}}<mask/>{{suffix}}"
value={settings.userMessageTemplate}
onChange={(e) =>
updateSettings({
userMessageTemplate: e.target.value,
})
}
/>
</SettingsItem>
<FewShotExampleSettings
fewShotExamples={settings.fewShotExamples}
name={"Few Shot Examples"}
description={
"The model uses these examples to learn the expected answer format. Not all examples are sent at the same time. We only send the relevant examples, given the current cursor location. For example, the CodeBlock examples are only sent if the cursor is in a code block. If no special context is detected, we send the Text examples. Each context has a default of 2 examples, but you can add or remove examples if there is at least one per context. You can add more examples, but this will increase the inference costs."
}
setFewShotExamples={(value) =>
updateSettings({ fewShotExamples: value })
}
errorMessages={errors}
/>
</>
)}
</div>
);
}

View File

@@ -0,0 +1,31 @@
import React from "react";
import InfioPlugin from "../main";
import { InfioSettings } from "../types/settings";
import ModelsSettings from "./ModelsSettings";
interface CustomSettingsProps {
plugin: InfioPlugin;
}
const CustomSettings: React.FC<CustomSettingsProps> = ({ plugin }) => {
const settings = plugin.settings;
const handleSettingsUpdate = async (newSettings: InfioSettings) => {
await plugin.setSettings(newSettings);
};
return (
<div>
<h1 style={{ display: "flex", alignItems: "center", justifyContent: "space-between" }}>
<div>
Infio Settings <small>v{settings.version}</small>
</div>
</h1>
<ModelsSettings settings={settings} setSettings={handleSettingsUpdate} />
</div>
);
};
export default CustomSettings;

View File

@@ -0,0 +1,93 @@
import React from "react";
export interface DropdownComponentProps {
name: string;
description?: string;
options: string[];
value: string;
onChange: (value: string) => void;
}
export const DropdownComponent: React.FC<DropdownComponentProps> = ({
name,
description,
options,
value,
onChange,
}) => (
<div className="infio-llm-setting-item">
<div className="infio-llm-setting-item-name">{name}</div>
{description && <div className="infio-llm-setting-item-description">{description}</div>}
<select
value={value}
onChange={(e) => onChange(e.target.value)}
className="infio-llm-setting-item-control"
>
{options.map((option) => (
<option key={option} value={option}>
{option}
</option>
))}
</select>
</div>
);
export interface TextComponentProps {
name: string;
description?: string;
placeholder: string;
value: string;
type?: string;
onChange: (value: string) => void;
}
export const TextComponent: React.FC<TextComponentProps> = ({
name,
description,
placeholder,
value,
type = "text",
onChange,
}) => (
<div className="infio-llm-setting-item">
<div className="infio-llm-setting-item-name">{name}</div>
{description && <div className="infio-llm-setting-item-description">{description}</div>}
<input
type={type}
className="infio-llm-setting-item-control"
placeholder={placeholder}
value={value}
onChange={(e) => onChange(e.target.value)}
/>
</div>
);
export interface ToggleComponentProps {
name?: string;
description?: string;
value: boolean;
onChange: (value: boolean) => void;
disabled?: boolean;
}
export const ToggleComponent: React.FC<ToggleComponentProps> = ({
name,
description,
value,
onChange,
disabled = false,
}) => (
<div className="infio-llm-setting-item">
{name && <div className="infio-llm-setting-item-name">{name}</div>}
{description && <div className="infio-llm-setting-item-description">{description}</div>}
<label className={`switch ${disabled ? "disabled" : ""}`}>
<input
type="checkbox"
checked={value}
onChange={(e) => onChange(e.target.checked)}
disabled={disabled}
/>
<span className="slider round"></span>
</label>
</div>
);

View File

@@ -0,0 +1,205 @@
import { Notice } from "obsidian";
import React, { useState } from "react";
import { CustomLLMModel, ModelProviders } from "../types/llm/model";
import { InfioSettings } from "../types/settings";
import { DropdownComponent, TextComponent, ToggleComponent } from "./FormComponents";
interface ModelRowProps {
model: CustomLLMModel;
canDelete: boolean;
onToggle: (enabled: boolean) => void;
onDelete: () => void;
}
const ModelRow: React.FC<ModelRowProps> = ({ model, canDelete, onToggle, onDelete }) => (
<tr>
<td>{model.provider}</td>
<td>{model.name}{model.isEmbeddingModel ? " [embedding]" : ""}</td>
<td>
<ToggleComponent
value={model.enabled}
onChange={onToggle}
/>
</td>
<td>
{!canDelete && (
<button onClick={onDelete}>Delete</button>
)}
</td>
</tr>
);
interface ModelFormProps {
providers: string[];
onSubmit: (model: CustomLLMModel) => void;
isEmbeddingModel: boolean;
}
const ModelForm: React.FC<ModelFormProps> = ({ providers, onSubmit, isEmbeddingModel }) => {
const [model, setModel] = useState<CustomLLMModel>({
name: "",
provider: providers[0] || "",
baseUrl: "",
apiKey: "",
enabled: true,
isBuiltIn: false,
isEmbeddingModel,
});
const handleSubmit = () => {
if (model.name && model.provider) {
onSubmit(model);
setModel({ ...model, name: "", baseUrl: "", apiKey: "" });
} else {
new Notice("Please fill in necessary fields!");
}
};
return (
<div className="infio-llm-add-custom-model-form infio-chat-settings-model-container">
<TextComponent
name="Model:"
value={model.name}
placeholder="Enter model name"
onChange={(name) => setModel({ ...model, name })}
/>
<DropdownComponent
name="Provider:"
options={providers}
value={model.provider}
onChange={(provider) => setModel({ ...model, provider })}
/>
<TextComponent
name="BaseURL (optional):"
value={model.baseUrl || ""}
placeholder="https://api.example.com/v1"
onChange={(baseUrl) => setModel({ ...model, baseUrl })}
/>
<TextComponent
name="APIKey:"
value={model.apiKey || ""}
placeholder="Enter API key"
type="password"
onChange={(apiKey) => setModel({ ...model, apiKey })}
/>
<ToggleComponent
name="IsEmbedding:"
value={model.isEmbeddingModel || false}
onChange={(isEmbeddingModel) => setModel({ ...model, isEmbeddingModel })}
/>
<button onClick={handleSubmit} className="infio-llm-add-model-button">
Add Model
</button>
</div>
);
};
interface ModelListProps {
models: CustomLLMModel[];
chatModelKey: string;
applyModelKey: string;
onUpdateModel: (index: number, model: CustomLLMModel) => void;
onDeleteModel: (modelKey: string) => void;
}
const ModelList: React.FC<ModelListProps> = ({
models,
chatModelKey,
applyModelKey,
onUpdateModel,
onDeleteModel,
}) => (
<div className="model-settings-container">
<table className="infio-llm-model-settings-table">
<thead>
<tr>
<th>Provider</th>
<th>Model</th>
<th>Enabled</th>
<th>Delete</th>
</tr>
</thead>
<tbody>
{models.map((model, index) => (
<ModelRow
key={`${model.name}`}
model={model}
canDelete={`${model.name}` in [chatModelKey, applyModelKey]}
onToggle={(enabled) => {
const updatedModel = { ...model, enabled };
onUpdateModel(index, updatedModel);
}}
onDelete={() => onDeleteModel(`${model.name}`)}
/>
))}
</tbody>
</table>
</div>
);
interface ModelsSettingsProps {
settings: InfioSettings;
setSettings: (settings: InfioSettings) => void;
}
const ModelsSettings: React.FC<ModelsSettingsProps> = ({ settings, setSettings }) => {
const [isAddModelOpen, setIsAddModelOpen] = useState(false);
const [activeModels, setActiveModels] = useState(settings.activeModels);
const handleUpdateModel = (index: number, updatedModel: CustomLLMModel) => {
const newActiveModels = [...activeModels];
newActiveModels[index] = updatedModel;
setSettings({ ...settings, activeModels: newActiveModels });
setActiveModels(newActiveModels);
};
const handleAddModel = (newModel: CustomLLMModel) => {
const newActiveModels = [...activeModels, newModel];
setSettings({ ...settings, activeModels: newActiveModels });
setActiveModels(newActiveModels);
};
const handleDeleteModel = (modelKey: string) => {
const newActiveModels = activeModels.filter(
(model) => `${model.name}` !== modelKey
);
setSettings({
...settings,
activeModels: newActiveModels,
});
setActiveModels(newActiveModels);
};
return (
<div>
<h2>Models</h2>
<div className="infio-llm-chat-setting-title infio-chat-setting-item-container">
<ModelList
models={activeModels}
chatModelKey={settings.chatModelId}
applyModelKey={settings.applyModelId}
onUpdateModel={handleUpdateModel}
onDeleteModel={handleDeleteModel}
/>
<div className="infio-llm-add-custom-model">
<h2 onClick={() => setIsAddModelOpen(!isAddModelOpen)} style={{ cursor: "pointer" }}>
Add Custom Model {isAddModelOpen ? "▼" : "▶"}
</h2>
{isAddModelOpen && (
<ModelForm
providers={Object.values(ModelProviders)}
onSubmit={handleAddModel}
isEmbeddingModel={false}
/>
)}
</div>
</div>
</div>
);
};
export default ModelsSettings;

855
src/settings/SettingTab.tsx Normal file
View File

@@ -0,0 +1,855 @@
import {
App,
DropdownComponent,
Modal,
PluginSettingTab,
Setting,
TFile,
} from 'obsidian';
import * as React from "react";
import { createRoot } from "react-dom/client";
// import {
// EMBEDDING_MODEL_OPTIONS,
// } from '../constants'
import InfioPlugin from '../main';
import { findFilesMatchingPatterns } from '../utils/glob-utils.ts';
import { getOllamaModels } from '../utils/ollama';
import AutoCompleteSettings from './AutoCompleteSettings.tsx';
import CustomSettings from './CustomSettings';
export class InfioSettingTab extends PluginSettingTab {
plugin: InfioPlugin
constructor(app: App, plugin: InfioPlugin) {
super(app, plugin)
this.plugin = plugin
}
display(): void {
const { containerEl } = this
containerEl.empty()
this.renderModelsSection(containerEl)
this.renderAPIKeysSection(containerEl)
this.renderDefaultModelSection(containerEl)
this.renderRAGSection(containerEl)
this.renderAutoCompleteSection(containerEl)
}
renderModelsSection(containerEl: HTMLElement): void {
const div = containerEl.createDiv("div");
const sections = createRoot(div);
sections.render(<CustomSettings plugin={this.plugin} />);
}
renderAPIKeysSection(containerEl: HTMLElement): void {
new Setting(containerEl)
.setHeading()
.setName('API keys')
new Setting(containerEl)
.setName('Infio API key')
.setClass("infio-chat-setting-item-container")
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(this.plugin.settings.infioApiKey)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
infioApiKey: value,
})
}),
)
new Setting(containerEl)
.setName('Anthropic API key')
.setClass("infio-chat-setting-item-container-append")
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(this.plugin.settings.anthropicApiKey)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
anthropicApiKey: value,
})
}),
)
new Setting(containerEl)
.setName('Deepseek API key')
.setClass("infio-chat-setting-item-container-append")
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(this.plugin.settings.deepseekApiKey)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
deepseekApiKey: value,
})
}),
)
new Setting(containerEl)
.setName('OpenAI API key')
.setClass("infio-chat-setting-item-container-append")
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(this.plugin.settings.openAIApiKey)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
openAIApiKey: value,
})
}),
)
new Setting(containerEl)
.setName('Gemini API key')
.setClass("infio-chat-setting-item-container-append")
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(this.plugin.settings.geminiApiKey)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
geminiApiKey: value,
})
}),
)
new Setting(containerEl)
.setName('Groq API key')
.setClass("infio-chat-setting-item-container-append")
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(this.plugin.settings.groqApiKey)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
groqApiKey: value,
})
}),
)
}
renderDefaultModelSection(containerEl: HTMLElement): void {
new Setting(containerEl).setHeading().setName('Default Model')
new Setting(containerEl)
.setName('Default chat model')
.setClass("infio-chat-setting-item-container")
.addDropdown((dropdown) =>
dropdown
.addOptions(
this.plugin.settings.activeModels
.reduce<Record<string, string>>((acc, option) => {
if (!option.isEmbeddingModel && option.enabled) {
acc[option.name] = option.name
}
return acc
}, {}),
)
.setValue(this.plugin.settings.chatModelId)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
chatModelId: value,
})
// Force refresh to show/hide Ollama and OpenAI-compatible settings
this.display()
}),
)
if (this.plugin.settings.chatModelId === 'ollama') {
this.renderOllamaChatModelSettings(containerEl)
}
if (this.plugin.settings.chatModelId === 'openai-compatible') {
this.renderOpenAICompatibleChatModelSettings(containerEl)
}
new Setting(containerEl)
.setName('Default apply model')
.setClass("infio-chat-setting-item-container-append")
.addDropdown((dropdown) =>
dropdown
.addOptions(
this.plugin.settings.activeModels
.reduce<Record<string, string>>(
(acc, option) => {
if (!option.isEmbeddingModel && option.enabled) {
acc[option.name] = option.name
}
return acc
},
{},
),
)
.setValue(this.plugin.settings.applyModelId)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
applyModelId: value,
})
// Force refresh to show/hide Ollama and OpenAI-compatible settings
this.display()
}),
)
if (this.plugin.settings.applyModelId === 'ollama') {
this.renderOllamaApplyModelSettings(containerEl)
}
if (this.plugin.settings.applyModelId === 'openai-compatible') {
this.renderOpenAICompatibleApplyModelSettings(containerEl)
}
new Setting(containerEl)
.setName('Default embedding model')
.setClass("infio-chat-setting-item-container-append")
.addDropdown((dropdown) =>
dropdown
.addOptions(
this.plugin.settings.activeModels
.reduce<Record<string, string>>(
(acc, option) => {
if (option.isEmbeddingModel && option.enabled) {
acc[option.name] = option.name
}
return acc
},
{},
),
)
.setValue(this.plugin.settings.embeddingModelId)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
embeddingModelId: value,
})
// Force refresh to show/hide Ollama settings
this.display()
}),
)
if (this.plugin.settings.embeddingModelId.startsWith('ollama/')) {
this.renderOllamaEmbeddingModelSettings(containerEl)
}
new Setting(containerEl)
.setHeading()
.setName('System prompt')
.setDesc('This prompt will be added to the beginning of every chat.')
new Setting(containerEl)
.setClass('infio-chat-settings-textarea')
.addTextArea((text) =>
text
.setValue(this.plugin.settings.systemPrompt)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
systemPrompt: value,
})
}),
)
}
renderOllamaChatModelSettings(containerEl: HTMLElement): void {
const ollamaContainer = containerEl.createDiv(
'infio-chat-settings-model-container',
)
let modelDropdown: DropdownComponent | null = null // Store reference to the dropdown
// Base URL Setting
new Setting(ollamaContainer)
.setName('Base URL')
.setClass("infio-chat-setting-item-container-append")
.setDesc(
'The API endpoint for your Ollama service (e.g., http://127.0.0.1:11434)',
)
.addText((text) => {
text
.setPlaceholder('http://127.0.0.1:11434')
.setValue(this.plugin.settings.ollamaChatModel.baseUrl || '')
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
ollamaChatModel: {
...this.plugin.settings.ollamaChatModel,
baseUrl: value,
},
})
if (modelDropdown) {
await this.updateOllamaModelOptions({
baseUrl: value,
dropdown: modelDropdown,
onModelChange: async (model: string) => {
await this.plugin.setSettings({
...this.plugin.settings,
ollamaChatModel: {
...this.plugin.settings.ollamaChatModel,
model,
},
})
},
})
}
})
})
// Model Setting
new Setting(ollamaContainer)
.setName('Model Name')
.setDesc('Select a model from your Ollama instance')
.addDropdown(async (dropdown) => {
const currentModel = this.plugin.settings.ollamaChatModel.model
modelDropdown = dropdown
.addOption(currentModel, currentModel)
.setValue(currentModel)
await this.updateOllamaModelOptions({
baseUrl: this.plugin.settings.ollamaChatModel.baseUrl,
dropdown,
onModelChange: async (model: string) => {
await this.plugin.setSettings({
...this.plugin.settings,
ollamaChatModel: {
...this.plugin.settings.ollamaChatModel,
model,
},
})
},
})
})
}
renderOpenAICompatibleChatModelSettings(containerEl: HTMLElement): void {
const openAICompatContainer = containerEl.createDiv(
'infio-chat-settings-model-container',
)
new Setting(openAICompatContainer)
.setName('Base URL')
.setDesc(
'The API endpoint for your OpenAI-compatible service (e.g., https://api.example.com/v1)',
)
.addText((text) =>
text
.setPlaceholder('https://api.example.com/v1')
.setValue(
this.plugin.settings.openAICompatibleChatModel.baseUrl || '',
)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
openAICompatibleChatModel: {
...this.plugin.settings.openAICompatibleChatModel,
baseUrl: value,
},
})
}),
)
new Setting(openAICompatContainer)
.setName('API Key')
.setDesc('Your authentication key for the OpenAI-compatible service')
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(this.plugin.settings.openAICompatibleChatModel.apiKey || '')
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
openAICompatibleChatModel: {
...this.plugin.settings.openAICompatibleChatModel,
apiKey: value,
},
})
}),
)
new Setting(openAICompatContainer)
.setName('Model Name')
.setDesc(
'The specific model to use with your service (e.g., llama-3.1-70b, mixtral-8x7b)',
)
.addText((text) =>
text
.setPlaceholder('llama-3.1-70b')
.setValue(this.plugin.settings.openAICompatibleChatModel.model || '')
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
openAICompatibleChatModel: {
...this.plugin.settings.openAICompatibleChatModel,
model: value,
},
})
}),
)
}
renderOllamaApplyModelSettings(containerEl: HTMLElement): void {
const ollamaContainer = containerEl.createDiv(
'infio-chat-settings-model-container',
)
let modelDropdown: DropdownComponent | null = null // Store reference to the dropdown
// Base URL Setting
new Setting(ollamaContainer)
.setName('Base URL')
.setDesc(
'The API endpoint for your Ollama service (e.g., http://127.0.0.1:11434)',
)
.addText((text) => {
text
.setPlaceholder('http://127.0.0.1:11434')
.setValue(this.plugin.settings.ollamaApplyModel.baseUrl || '')
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
ollamaApplyModel: {
...this.plugin.settings.ollamaApplyModel,
baseUrl: value,
},
})
if (modelDropdown) {
await this.updateOllamaModelOptions({
baseUrl: value,
dropdown: modelDropdown,
onModelChange: async (model: string) => {
await this.plugin.setSettings({
...this.plugin.settings,
ollamaApplyModel: {
...this.plugin.settings.ollamaApplyModel,
model,
},
})
},
})
}
})
})
// Model Setting
new Setting(ollamaContainer)
.setName('Model Name')
.setDesc('Select a model from your Ollama instance')
.addDropdown(async (dropdown) => {
const currentModel = this.plugin.settings.ollamaApplyModel.model
modelDropdown = dropdown
.addOption(currentModel, currentModel)
.setValue(currentModel)
await this.updateOllamaModelOptions({
baseUrl: this.plugin.settings.ollamaApplyModel.baseUrl,
dropdown,
onModelChange: async (model: string) => {
await this.plugin.setSettings({
...this.plugin.settings,
ollamaApplyModel: {
...this.plugin.settings.ollamaApplyModel,
model,
},
})
},
})
})
}
renderOpenAICompatibleApplyModelSettings(containerEl: HTMLElement): void {
const openAICompatContainer = containerEl.createDiv(
'infio-chat-settings-model-container',
)
new Setting(openAICompatContainer)
.setName('Base URL')
.setDesc(
'The API endpoint for your OpenAI-compatible service (e.g., https://api.example.com/v1)',
)
.addText((text) =>
text
.setPlaceholder('https://api.example.com/v1')
.setValue(
this.plugin.settings.openAICompatibleApplyModel.baseUrl || '',
)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
openAICompatibleApplyModel: {
...this.plugin.settings.openAICompatibleApplyModel,
baseUrl: value,
},
})
}),
)
new Setting(openAICompatContainer)
.setName('API Key')
.setDesc('Your authentication key for the OpenAI-compatible service')
.addText((text) =>
text
.setPlaceholder('Enter your API key')
.setValue(
this.plugin.settings.openAICompatibleApplyModel.apiKey || '',
)
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
openAICompatibleApplyModel: {
...this.plugin.settings.openAICompatibleApplyModel,
apiKey: value,
},
})
}),
)
new Setting(openAICompatContainer)
.setName('Model Name')
.setDesc(
'The specific model to use with your service (e.g., llama-3.1-70b, mixtral-8x7b)',
)
.addText((text) =>
text
.setPlaceholder('llama-3.1-70b')
.setValue(this.plugin.settings.openAICompatibleApplyModel.model || '')
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
openAICompatibleApplyModel: {
...this.plugin.settings.openAICompatibleApplyModel,
model: value,
},
})
}),
)
}
renderOllamaEmbeddingModelSettings(containerEl: HTMLElement): void {
const ollamaContainer = containerEl.createDiv(
'infio-chat-settings-model-container',
)
new Setting(ollamaContainer)
.setName('Base URL')
.setDesc(
'The API endpoint for your Ollama service (e.g., http://127.0.0.1:11434)',
)
.addText((text) =>
text
.setPlaceholder('http://127.0.0.1:11434')
.setValue(this.plugin.settings.ollamaEmbeddingModel.baseUrl || '')
.onChange(async (value) => {
await this.plugin.setSettings({
...this.plugin.settings,
ollamaEmbeddingModel: {
...this.plugin.settings.ollamaEmbeddingModel,
baseUrl: value,
},
})
}),
)
}
renderRAGSection(containerEl: HTMLElement): void {
new Setting(containerEl).setHeading().setName('RAG')
new Setting(containerEl)
.setName('Include patterns')
.setDesc(
'If any patterns are specified, ONLY files matching at least one pattern will be included in indexing. One pattern per line. Uses glob patterns (e.g., "notes/*", "*.md"). Leave empty to include all files not excluded by exclude patterns. After changing this, use the command "Rebuild entire vault index" to apply changes.',
)
.addButton((button) =>
button.setButtonText('Test patterns').onClick(async () => {
const patterns = this.plugin.settings.ragOptions.includePatterns
const includedFiles = await findFilesMatchingPatterns(
patterns,
this.plugin.app.vault,
)
new IncludedFilesModal(this.app, includedFiles, patterns).open()
}),
)
new Setting(containerEl)
.setClass('infio-chat-settings-textarea')
.addTextArea((text) =>
text
.setValue(this.plugin.settings.ragOptions.includePatterns.join('\n'))
.onChange(async (value) => {
const patterns = value
.split('\n')
.map((p) => p.trim())
.filter((p) => p.length > 0)
await this.plugin.setSettings({
...this.plugin.settings,
ragOptions: {
...this.plugin.settings.ragOptions,
includePatterns: patterns,
},
})
}),
)
new Setting(containerEl)
.setName('Exclude patterns')
.setDesc(
'Files matching ANY of these patterns will be excluded from indexing. One pattern per line. Uses glob patterns (e.g., "private/*", "*.tmp"). Leave empty to exclude nothing. After changing this, use the command "Rebuild entire vault index" to apply changes.',
)
.addButton((button) =>
button.setButtonText('Test patterns').onClick(async () => {
const patterns = this.plugin.settings.ragOptions.excludePatterns
const excludedFiles = await findFilesMatchingPatterns(
patterns,
this.plugin.app.vault,
)
new ExcludedFilesModal(this.app, excludedFiles).open()
}),
)
new Setting(containerEl)
.setClass('infio-chat-settings-textarea')
.addTextArea((text) =>
text
.setValue(this.plugin.settings.ragOptions.excludePatterns.join('\n'))
.onChange(async (value) => {
const patterns = value
.split('\n')
.map((p) => p.trim())
.filter((p) => p.length > 0)
await this.plugin.setSettings({
...this.plugin.settings,
ragOptions: {
...this.plugin.settings.ragOptions,
excludePatterns: patterns,
},
})
}),
)
new Setting(containerEl)
.setName('Chunk size')
.setDesc(
'Set the chunk size for text splitting. After changing this, please re-index the vault using the "Rebuild entire vault index" command.',
)
.addText((text) =>
text
.setPlaceholder('1000')
.setValue(String(this.plugin.settings.ragOptions.chunkSize))
.onChange(async (value) => {
const chunkSize = parseInt(value, 10)
if (!isNaN(chunkSize)) {
await this.plugin.setSettings({
...this.plugin.settings,
ragOptions: {
...this.plugin.settings.ragOptions,
chunkSize,
},
})
}
}),
)
new Setting(containerEl)
.setName('Threshold tokens')
.setDesc(
'Maximum number of tokens before switching to RAG. If the total tokens from mentioned files exceed this, RAG will be used instead of including all file contents.',
)
.addText((text) =>
text
.setPlaceholder('8192')
.setValue(String(this.plugin.settings.ragOptions.thresholdTokens))
.onChange(async (value) => {
const thresholdTokens = parseInt(value, 10)
if (!isNaN(thresholdTokens)) {
await this.plugin.setSettings({
...this.plugin.settings,
ragOptions: {
...this.plugin.settings.ragOptions,
thresholdTokens,
},
})
}
}),
)
new Setting(containerEl)
.setName('Minimum similarity')
.setDesc(
'Minimum similarity score for RAG results. Higher values return more relevant but potentially fewer results.',
)
.addText((text) =>
text
.setPlaceholder('0.0')
.setValue(String(this.plugin.settings.ragOptions.minSimilarity))
.onChange(async (value) => {
const minSimilarity = parseFloat(value)
if (!isNaN(minSimilarity)) {
await this.plugin.setSettings({
...this.plugin.settings,
ragOptions: {
...this.plugin.settings.ragOptions,
minSimilarity,
},
})
}
}),
)
new Setting(containerEl)
.setName('Limit')
.setDesc(
'Maximum number of RAG results to include in the prompt. Higher values provide more context but increase token usage.',
)
.addText((text) =>
text
.setPlaceholder('10')
.setValue(String(this.plugin.settings.ragOptions.limit))
.onChange(async (value) => {
const limit = parseInt(value, 10)
if (!isNaN(limit)) {
await this.plugin.setSettings({
...this.plugin.settings,
ragOptions: {
...this.plugin.settings.ragOptions,
limit,
},
})
}
}),
)
}
renderAutoCompleteSection(containerEl: HTMLElement): void {
const div = containerEl.createDiv("div");
const sections = createRoot(div);
sections.render(
<React.StrictMode>
<AutoCompleteSettings
onSettingsChanged={async (settings) => {
this.plugin.setSettings(settings);
}}
settings={this.plugin.settings}
/>
</React.StrictMode>
);
}
private async updateOllamaModelOptions({
baseUrl,
dropdown,
onModelChange,
}: {
baseUrl: string
dropdown: DropdownComponent
onModelChange: (model: string) => Promise<void>
}): Promise<void> {
const currentValue = dropdown.getValue()
dropdown.selectEl.empty()
try {
const models = await getOllamaModels(baseUrl)
if (models.length > 0) {
const modelOptions = models.reduce<Record<string, string>>(
(acc, model) => {
acc[model] = model
return acc
},
{},
)
dropdown.addOptions(modelOptions)
if (models.includes(currentValue)) {
dropdown.setValue(currentValue)
} else {
dropdown.setValue(models[0])
await onModelChange(models[0])
}
} else {
dropdown.addOption('', 'No models found - check base URL')
dropdown.setValue('')
await onModelChange('')
}
} catch (error) {
console.error('Failed to fetch Ollama models:', error)
dropdown.addOption('', 'No models found - check base URL')
dropdown.setValue('')
await onModelChange('')
}
dropdown.onChange(async (value) => {
await onModelChange(value)
})
}
}
class ExcludedFilesModal extends Modal {
private files: TFile[]
constructor(app: App, files: TFile[]) {
super(app)
this.files = files
}
onOpen() {
const { contentEl } = this
contentEl.empty()
this.titleEl.setText(`Excluded Files (${this.files.length})`)
if (this.files.length === 0) {
contentEl.createEl('p', { text: 'No files match the exclusion patterns' })
return
}
const list = contentEl.createEl('ul')
this.files.forEach((file) => {
list.createEl('li', { text: file.path })
})
}
onClose() {
const { contentEl } = this
contentEl.empty()
}
}
class IncludedFilesModal extends Modal {
private files: TFile[]
private patterns: string[]
constructor(app: App, files: TFile[], patterns: string[]) {
super(app)
this.files = files
this.patterns = patterns
}
onOpen() {
const { contentEl } = this
contentEl.empty()
this.titleEl.setText(`Included Files (${this.files.length})`)
if (this.patterns.length === 0) {
contentEl.createEl('p', {
text: 'No inclusion patterns specified - all files will be included (except those matching exclusion patterns)',
})
return
}
if (this.files.length === 0) {
contentEl.createEl('p', {
text: 'No files match the inclusion patterns',
})
return
}
const list = contentEl.createEl('ul')
this.files.forEach((file) => {
list.createEl('li', { text: file.path })
})
}
onClose() {
const { contentEl } = this
contentEl.empty()
}
}

View File

@@ -0,0 +1,32 @@
import * as React from "react";
import SettingsItem from "./SettingsItem";
interface IProps {
name: string;
description: string;
setEnabled(value: boolean): void;
enabled: boolean;
}
export default function CheckBoxSettingItem(props: IProps): React.JSX.Element {
const { enabled, setEnabled, name, description } = props;
const checkContainerClasses = ["checkbox-container"];
if (enabled) {
checkContainerClasses.push("is-enabled");
}
const onClick = () => {
setEnabled(!enabled);
};
return (
<SettingsItem name={name} description={description}>
<div onClick={onClick} className={checkContainerClasses.join(" ")}>
<input type="checkbox" tabIndex={0} />
</div>
</SettingsItem>
);
}

View File

@@ -0,0 +1,43 @@
import * as React from "react";
import SettingsItem from "./SettingsItem";
interface IProps {
name: string;
description: string;
value: string;
setValue(value: string): void;
options: { [key: string]: string };
errorMessage?: string;
disabled?: boolean;
}
export default function DropDownSettingItem(props: IProps): React.JSX.Element {
const { name, description, errorMessage } = props;
const onChange = (e: React.ChangeEvent<HTMLSelectElement>) => {
props.setValue(e.target.value);
};
return (
<SettingsItem
name={name}
description={description}
errorMessage={errorMessage}
>
<select
className="dropdown"
value={props.value}
onChange={onChange}
disabled={props.disabled}
>
{Object.entries(props.options).map(([key, value]) => (
<option key={key} value={key}>
{value}
</option>
))}
</select>
</SettingsItem>
);
}

View File

@@ -0,0 +1,233 @@
import { Notice } from "obsidian";
import * as React from "react";
import Context from "../../core/autocomplete/context-detection";
import { FewShotExample } from "../versions";
interface IProps {
name: string;
description: string;
errorMessages: Map<string, string>;
fewShotExamples: FewShotExample[];
setFewShotExamples(fewShotExamples: FewShotExample[]): void;
}
export default function FewShotExampleSettings(
props: IProps
): React.JSX.Element {
const onClickRemoveButton = (index: number) => {
return () => {
const newFewShotExamples = props.fewShotExamples
.slice(0, index)
.concat(props.fewShotExamples.slice(index + 1));
props.setFewShotExamples(newFewShotExamples);
};
};
const onClickAddButton = () => {
const newFewShotExamples = [
{
context: Context.Text,
input: "TODO",
answer: "Thought: TODO\nAnswer: TODO",
},
...props.fewShotExamples,
];
props.setFewShotExamples(newFewShotExamples);
};
const onChangeContext = (index: number) => {
return (e: React.ChangeEvent<HTMLSelectElement>) => {
const newFewShotExamples = [...props.fewShotExamples];
const context = Context.get(e.target.value);
if (context === undefined) {
new Notice("Invalid context");
return;
}
newFewShotExamples[index] = {
...newFewShotExamples[index],
context,
};
props.setFewShotExamples(newFewShotExamples);
};
};
const onChangeInput = (index: number) => {
return (e: React.ChangeEvent<HTMLTextAreaElement>) => {
const newFewShotExamples = [...props.fewShotExamples];
newFewShotExamples[index] = {
...newFewShotExamples[index],
input: e.target.value,
};
props.setFewShotExamples(newFewShotExamples);
};
};
const onAnswerInput = (index: number) => {
return (e: React.ChangeEvent<HTMLTextAreaElement>) => {
const newFewShotExamples = [...props.fewShotExamples];
newFewShotExamples[index] = {
...newFewShotExamples[index],
answer: e.target.value,
};
props.setFewShotExamples(newFewShotExamples);
};
};
return (
<div>
<div className="setting-item" style={{ display: "flex" }}>
<div className="setting-item-info">
<div className="setting-item-name">{props.name}</div>
<div className="setting-item-description">
{props.description}
</div>
{props.errorMessages.get("fewShotExamples") !== undefined && (
<div className="setting-item-description ">
<span className={"mod-warning"}>
{props.errorMessages.get("fewShotExamples")}
</span>
</div>
)}
</div>
<div style={{ top: 0, right: 0 }}>
<span
className="clickable-icon setting-editor-extra-setting-button"
aria-label="Add"
onClick={onClickAddButton}
>
<svg
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
className="svg-icon lucide-plus"
>
<line x1="12" y1="5" x2="12" y2="19"></line>
<line x1="5" y1="12" x2="19" y2="12"></line>
</svg>
</span>
</div>
</div>
{props.fewShotExamples.map((example, index) => (
<div
key={`few-shot-example-${index}`}
style={{
borderBottom:
"1px solid var(--background-modifier-border)",
}}
>
<div className="setting-item" style={{ display: "flex" }}>
<div className="setting-item-info">
<div className="setting-item-name">
Example {index + 1}
</div>
</div>
<div style={{ top: 0, right: 0 }}>
<span
className="clickable-icon setting-editor-extra-setting-button"
aria-label="Remove"
onClick={onClickRemoveButton(index)}
>
<svg
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
className="svg-icon lucide-x"
>
<line x1="18" y1="6" x2="6" y2="18"></line>
<line x1="6" y1="6" x2="18" y2="18"></line>
</svg>
</span>
</div>
</div>
<div
className="setting-item"
style={{ display: "block", borderTop: 0 }}
>
<div className="setting-item-control">
<div
className="setting-item-name"
style={{ textAlign: "left", width: "100%" }}
>
Context
</div>
<select
className="dropdown"
value={example.context}
onChange={onChangeContext(index)}
>
{Context.values().map((key) => (
<option key={key} value={key}>
{key}
</option>
))}
</select>
</div>
<div
className="setting-item-control"
style={{ display: "block" }}
>
<div
className="setting-item-name"
style={{ width: "100%", textAlign: "left" }}
>
Human Message
</div>
{props.errorMessages.get(`fewShotExamples.${index}.input`) !== undefined && (
<div className="setting-item-description" style={{ width: "100%", textAlign: "left" }}>
<span className={"mod-warning"}>
{props.errorMessages.get(`fewShotExamples.${index}.input`)}
</span>
</div>
)}
<textarea
className="infio-autocomplete-setting-item-textarea"
rows={5}
style={{ width: "100%" }}
value={example.input}
onChange={onChangeInput(index)}
/>
<div
className="setting-item-name"
style={{ width: "100%", textAlign: "left" }}
>
Assistant Message
</div>
{props.errorMessages.get(`fewShotExamples.${index}.answer`) !== undefined && (
<div className="setting-item-description" style={{ width: "100%", textAlign: "left" }}>
<span className={"mod-warning"}>
{props.errorMessages.get(`fewShotExamples.${index}.answer`)}
</span>
</div>
)}
<textarea
className="infio-autocomplete-setting-item-textarea"
rows={5}
style={{ width: "100%" }}
value={example.answer}
onChange={onAnswerInput(index)}
/>
</div>
</div>
</div>
))}
</div>
);
}

View File

@@ -0,0 +1,34 @@
import * as React from "react";
interface IProps {
name: string;
description: string | React.ReactNode;
errorMessage?: string;
children: React.ReactNode;
display?: "block" | "inline-block" | "flex";
}
export default function SettingsItem({
name,
description,
children,
errorMessage,
display = "flex",
}: IProps): React.JSX.Element {
return (
<div className="setting-item" style={{ display: display }}>
<div className="setting-item-info">
<div className="setting-item-name">{name}</div>
<div className="setting-item-description">{description}</div>
{errorMessage !== undefined && (
<div className="setting-item-description">
<span className={"mod-warning"}>{errorMessage}</span>
</div>
)}
</div>
<div className="setting-item-control">{children}</div>
</div>
);
}

View File

@@ -0,0 +1,73 @@
import * as React from "react";
import { useRef } from "react";
import SettingsItem from "./SettingsItem";
interface IProps {
name: string;
description: string;
setValue(value: number): void;
errorMessage?: string;
suffix?: string;
value: number;
min: number;
max: number;
step: number;
}
export default function SliderSettingsItem(props: IProps): React.JSX.Element {
const { errorMessage } = props;
const [isFocused, setIsFocused] = React.useState<boolean>(false);
const sliderRef = useRef<HTMLInputElement>(null);
const onValueChange = (e: React.ChangeEvent<HTMLInputElement>) => {
let value = Number(e.target.value);
if (isNaN(value)) {
return;
}
value = Math.min(props.max, Math.max(props.min, value));
props.setValue(value);
};
const displayValue = props.value + (props.suffix ? props.suffix : "");
return (
<SettingsItem
name={props.name}
description={props.description}
errorMessage={errorMessage}
>
<input
ref={sliderRef}
onChange={onValueChange}
className="slider"
type="range"
min={props.min}
max={props.max}
step={props.step}
value={props.value}
onMouseEnter={() => setIsFocused(true)}
onMouseLeave={() => setIsFocused(false)}
onFocus={() => setIsFocused(true)}
onBlur={() => setIsFocused(false)}
/>
{isFocused && sliderRef.current !== null && (
<div
className="tooltip mod-top"
style={{
top: sliderRef.current.getBoundingClientRect().top - 30,
left:
sliderRef.current.getBoundingClientRect().left +
sliderRef.current.getBoundingClientRect().width / 2,
}}
>
{displayValue}
</div>
)}
</SettingsItem>
);
}

View File

@@ -0,0 +1,50 @@
import * as React from "react";
import SettingsItem from "./SettingsItem";
interface IProps {
name: string;
description: string;
placeholder: string;
setValue(value: string): void;
errorMessage?: string;
value: string;
password?: boolean;
disabled?: boolean;
}
export default function TextSettingItem(props: IProps): React.JSX.Element {
const {
value,
setValue,
password,
placeholder,
name,
description,
errorMessage,
} = props;
const onChange = (e: React.ChangeEvent<HTMLInputElement>) => {
const newValue = e.target.value;
setValue(newValue);
};
return (
<SettingsItem
name={name}
description={description}
errorMessage={errorMessage}
>
<input
type={password ? "password" : "text"}
placeholder={placeholder}
onChange={onChange}
value={value}
disabled={props.disabled}
/>
</SettingsItem>
);
}

View File

@@ -0,0 +1,169 @@
import * as React from "react";
import { Trigger } from "../versions";
interface IProps {
name: string;
description: string;
triggers: Trigger[];
setValues(value: Trigger[]): void;
errorMessage?: string;
errorMessages: Map<string, string>;
}
function TriggerSettings(props: IProps): React.JSX.Element {
const { name, triggers, description, setValues, errorMessage } = props;
const onClickAddButton = () => {
setValues([{ value: "TODO...", type: "string" }, ...triggers]);
};
const onClickRemoveButton = (index: number) => {
return () => {
const newTriggers = triggers
.slice(0, index)
.concat(triggers.slice(index + 1));
setValues(newTriggers);
};
};
const onChangeType = (index: number) => {
return (e: React.ChangeEvent<HTMLSelectElement>) => {
if (e.target.value === "regex" || e.target.value === "string") {
const value = triggers[index].value;
const newTriggers = [...triggers];
newTriggers[index] = { type: e.target.value as Trigger["type"], value };
setValues(newTriggers);
}
};
}
const onChangeValue = (index: number) => {
return (e: React.ChangeEvent<HTMLInputElement>) => {
const type = triggers[index].type;
const newTriggers = [...triggers];
newTriggers[index] = { type, value: decodeHiddenCharacters(e.target.value) };
setValues(newTriggers);
};
};
return (
<>
<div className="setting-item">
<div className="setting-item-info">
<div className="setting-item-name">{name}</div>
<div className="setting-item-description">
{description}
</div>
{props.errorMessages.get("triggerWords") !== undefined && (
<div className="setting-item-description ">
<span className={"mod-warning"}>
{errorMessage}
</span>
</div>
)}
</div>
<div className="setting-item-control">
<div
className="clickable-icon setting-editor-extra-setting-button"
aria-label="Add"
onClick={onClickAddButton}
>
<svg
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
className="svg-icon lucide-plus"
>
<line x1="12" y1="5" x2="12" y2="19"></line>
<line x1="5" y1="12" x2="19" y2="12"></line>
</svg>
</div>
</div>
</div>
{triggers.map((trigger: Trigger, index: number) => (
<div
className="infio-autocomplete-setting-list-item"
key={`setting-list-item-${name.replace(" ", "-")}-${index}`}
>
{(props.errorMessages.get(`triggers.${index}.value`) !== undefined || props.errorMessages.get(`triggers.${index}.type`) !== undefined) && (
<div className="setting-item-description" style={{ width: "100%", textAlign: "left" }}>
{props.errorMessages.get(`triggers.${index}.value`) !== undefined && (
<span className={"mod-warning"}>
{props.errorMessages.get(`triggers.${index}.value`)}
</span>
)}
{props.errorMessages.get(`triggers.${index}.type`) !== undefined && (
<span className={"mod-warning"}>
{props.errorMessages.get(`triggers.${index}.type`)}
</span>
)}
</div>
)}
<div className="setting-item-info">
<div className="setting-item-control">
<select
className="dropdown"
value={trigger.type}
onChange={onChangeType(index)}
>
<option value={"string"}>
string
</option>
<option value={"regex"}>
regex
</option>
</select>
<input
style={{ whiteSpace: "pre-wrap" }}
type="text"
placeholder={"TODO..."}
value={encodeHiddenCharacters(trigger.value)}
onChange={onChangeValue(index)}
/>
<div
className="clickable-icon setting-editor-extra-setting-button"
aria-label="Remove"
onClick={onClickRemoveButton(index)}
>
<svg
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
className="svg-icon lucide-x"
>
<line x1="18" y1="6" x2="6" y2="18"></line>
<line x1="6" y1="6" x2="18" y2="18"></line>
</svg>
</div>
</div>
</div>
</div>
))}
</>
);
}
function encodeHiddenCharacters(value: string) {
return value.replace("\t", "\\t").replace("\n", "\\n");
}
function decodeHiddenCharacters(value: string) {
return value.replace("\\t", "\t").replace("\\n", "\n");
}
export default TriggerSettings;

View File

@@ -0,0 +1,11 @@
export type { FewShotExample } from "./shared";
export { DEFAULT_AUTOCOMPLETE_SETTINGS as DEFAULT_SETTINGS, settingsSchema, pluginDataSchema } from "./v1/v1";
export type { Settings, Trigger, PluginData } from "./v1/v1";
export {
MAX_DELAY, MAX_FREQUENCY_PENALTY,
MAX_MAX_CHAR_LIMIT,
MAX_MAX_TOKENS, MAX_PRESENCE_PENALTY, MAX_TEMPERATURE, MAX_TOP_P,
MIN_DELAY, MIN_FREQUENCY_PENALTY,
MIN_MAX_CHAR_LIMIT,
MIN_MAX_TOKENS, MIN_PRESENCE_PENALTY, MIN_TEMPERATURE, MIN_TOP_P
} from "./shared";

View File

@@ -0,0 +1,73 @@
import { cloneDeep, get, has, set } from "lodash";
import { findEqualPaths, isRegexValid } from "../../utils/auto-complete";
import {
DEFAULT_SETTINGS as DEFAULT_SETTINGS_V0,
Settings as SettingsV0,
Trigger,
settingsSchema as settingsSchemaV0
} from "./v0/v0";
import {
DEFAULT_AUTOCOMPLETE_SETTINGS as DEFAULT_SETTINGS_V1,
Settings as SettingsV1,
settingsSchema as settingsSchemaV1
} from "./v1/v1";
export function migrateFromV0ToV1(settings: SettingsV0): SettingsV1 {
// eslint-disable @typescript-eslint/no-explicit-any
const updatedSettings: any = cloneDeep(settings);
migrateDefaultSettings(updatedSettings, DEFAULT_SETTINGS_V0, DEFAULT_SETTINGS_V1);
updatedSettings.triggers.forEach((trigger: Trigger) => {
// Check if the trigger type is 'regex' and if its value does not end with '$'
if (trigger.type === 'regex' && !trigger.value.endsWith('$')) {
// Append '$' to the trigger value
trigger.value += '$';
}
});
updatedSettings.triggers = updatedSettings
.triggers
.filter((trigger: Trigger) => trigger.value.length > 0)
.filter((trigger: Trigger) => trigger.type !== 'regex' || isRegexValid(trigger.value));
// Add the 'version' property with the value '1'
updatedSettings.version = '1';
if (!isRegexValid(updatedSettings.chainOfThoughRemovalRegex)) {
updatedSettings.chainOfThoughRemovalRegex = DEFAULT_SETTINGS_V1.chainOfThoughRemovalRegex;
}
updatedSettings.ignoredFilePatterns = DEFAULT_SETTINGS_V1.ignoredFilePatterns;
updatedSettings.ignoredTags = DEFAULT_SETTINGS_V1.ignoredTags;
updatedSettings.cacheSuggestions = DEFAULT_SETTINGS_V1.cacheSuggestions;
updatedSettings.ollamaApiSettings = DEFAULT_SETTINGS_V1.ollamaApiSettings;
updatedSettings.debugMode = DEFAULT_SETTINGS_V1.debugMode;
// Parsing the updated settings to ensure they match the SettingsV1 schema
return settingsSchemaV1.parse(updatedSettings);
}
function migrateDefaultSettings(setting: any, previousDefault: any, currentDefault: any): any {
const unchangedDefaultProperties = findEqualPaths(setting, previousDefault);
for (const path of unchangedDefaultProperties) {
if (has(currentDefault, path)) {
const newDefaultValue = get(currentDefault, path);
set(setting, path, newDefaultValue);
}
}
}
export const isSettingsV0 = (settings: object): boolean => {
const result = settingsSchemaV0.safeParse(settings);
return result.success;
}
export const isSettingsV1 = (settings: object): boolean => {
const result = settingsSchemaV1.safeParse(settings);
return result.success;
}

View File

@@ -0,0 +1,57 @@
import { z } from "zod";
export const MIN_DELAY = 0;
export const MAX_DELAY = 2000;
export const MIN_MAX_CHAR_LIMIT = 100;
export const MAX_MAX_CHAR_LIMIT = 10000;
export const MIN_MAX_TOKENS = 128;
export const MAX_MAX_TOKENS = 8192;
export const MIN_TEMPERATURE = 0;
export const MAX_TEMPERATURE = 1;
export const MIN_TOP_P = 0;
export const MAX_TOP_P = 1;
export const MIN_FREQUENCY_PENALTY = 0;
export const MAX_FREQUENCY_PENALTY = 2;
export const MIN_PRESENCE_PENALTY = 0;
export const MAX_PRESENCE_PENALTY = 2;
export const azureOAIApiSettingsSchema = z.object({
key: z.string(),
url: z.string().url().or(z.string().max(0)),
}).strict();
export const openAIApiSettingsSchema = z.object({
key: z.string(),
url: z.string().url(),
model: z.string(),
}).strict();
export const ollamaApiSettingsSchema = z.object({
url: z.string().url(),
model: z.string(),
}).strict();
export const modelOptionsSchema = z.object({
temperature: z.number()
.min(0, { message: `Temperature must be at least ${MIN_TEMPERATURE}` })
.max(1, { message: `Temperature must be at most ${MAX_TEMPERATURE}` }),
top_p: z.number()
.min(0, { message: `top_p must be greater than ${MIN_TOP_P}` })
.max(1, { message: `top_p must be at most ${MAX_TOP_P}` }),
frequency_penalty: z.number()
.min(0, { message: `Frequency penalty must be at least ${MIN_FREQUENCY_PENALTY}` })
.max(2, { message: `Frequency penalty must be at most ${MAX_FREQUENCY_PENALTY}` }),
presence_penalty: z.number().min(MIN_PRESENCE_PENALTY, { message: `Presence penalty must be at least ${MIN_PRESENCE_PENALTY}` }).max(MAX_PRESENCE_PENALTY, { message: `Presence penalty must be at most ${MAX_PRESENCE_PENALTY}` }),
max_tokens: z.number().int()
.min(MIN_MAX_TOKENS, { message: `max_tokens must be at least than ${MIN_MAX_TOKENS}` }).max(MAX_MAX_TOKENS, { message: `max_tokens must be at most ${MAX_MAX_TOKENS}` }),
}).strict();
export const fewShotExampleSchema = z.object({
// TODO: figure out how to make this compatible with the context enum and its namespace.
context: z.enum(["Text", "Heading", "BlockQuotes", "UnorderedList", "NumberedList", "CodeBlock", "MathBlock", "TaskList"]),
input: z.string().min(3, { message: "The Input must be at least 3 characters long" }),
answer: z.string().min(3, { message: "The Answer must be at least 3 characters long" }),
}).strict();
export type FewShotExample = z.infer<typeof fewShotExampleSchema>;

View File

@@ -0,0 +1,18 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.BlockQuotes,
input: `# Matthew effect
The Matthew effect is named after the following New Testament verse:
> <mask/>
According to Malcolm Gladwell, the Matthew effect means that more successful people are most likely given special attention and opportunities leading to further success. `,
answer: `THOUGHT: The <mask/> is located inside a Markdown quote block, so I should write a quote. The text after <mask/> is about the Matthew effect. The text before <mask/> say the quote is from the New Testament. So my answer should contain something from the New Testament that is about the Matthew effect.
ANSWER: For unto everyone that hath shall be given, and he shall have abundance.
> But from him that hath not shall taken away even that which he hath.
> Matthew 25:29`,
};
export default example;

View File

@@ -0,0 +1,23 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const messages: FewShotExample = {
context: Context.CodeBlock,
input: `# debounce
A debounce function makes sure that a function is only triggered once per user input. This is useful for event based triggers. You can implement in javascript like this:
\`\`\`javascript
function debounce(func, timeout = 300){
<mask/>
}
\`\`\`
`,
answer: `THOUGHT: The <mask/> is located in JavaScript code block. The text before the <mask/> is describes what a debounce function does, and it defines the function signature. So the answer should not include the function signature to avoid duplication. The <mask/> is inside this function, so the answer should finish the implementation of the function. There is some indentation before the <mask/>, so the answer should be indented as well.
ANSWER:let timer;
return (...args) => {
clearTimeout(timer);
timer = setTimeout(() => { func.apply(this, args); }, timeout);
};`,
};
export default messages;

View File

@@ -0,0 +1,19 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const messages: FewShotExample = {
context: Context.CodeBlock,
input: `\`\`\`python
def fibonacci(<mask/>) -> int:
\tif n == 0 or n == 1:
\t\treturn n
\telse:
\t\treturn fibonacci(n-1) + fibonacci(n-2)
\`\`\`
`,
answer: `THOUGHT: The <mask/> is located in Python code block. Based on the location of </mask> I should write the function's arguments and types. Based on the text after </mask>, I see that the function uses a variable n with type int, which is not yet defined.
ANSWER: n: int`,
};
export default messages;

View File

@@ -0,0 +1,16 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.Heading,
input: `# <mask/>
A neuron is considered dead if it does not activate for any of the training instance in the training dataset. Because it never activates it will never have a gradient due to the chain rule so it also cannot change anymore. The dead ReLU problem can have due to a wide variety of reasons, such as:
1. Poorly initialized weights.
2. Extremely high learning rates during training.
`,
answer: `THOUGHT: <mask/> is located inside a Markdown headings, so I should write a title. There is no text in the header yet. The text after <mask/> is about the dead ReLU problem, so the title should reflect this.
ANSWER: The dead ReLU problem`,
};
export default example;

View File

@@ -0,0 +1,13 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.Heading,
input: `# The Softmax <mask/>
The softmax function transforms a vector into a probability distribution such that the sum of the vector is equal to 1.`,
answer: `THOUGHT: <mask/> is located inside a Markdown headings. The header already contains the text "The Softmax" contains so my answer should be coherent with that. The text after <mask/> is about the softmax function, so the title should reflect this.
ANSWER: function`,
};
export default example;

View File

@@ -0,0 +1,15 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const messages: FewShotExample = {
context: Context.MathBlock,
input: String.raw`# Logarithm definition
A logarithm is the power to which a base must be raised to yield a given number. For example $2^3 =8$; therefore, 3 is the logarithm of 8 to base 2, or in other words $<mask/>$.3 =
`,
answer: String.raw`THOUGHT: The <mask/> is located inline math block. The text before the mask is about logarithm. The text is giving an example but the math notation still needs to be completed. So my answer should be the latex formula for this example.
ANSWER: 3 = \log_2(8)`,
};
export default messages;

View File

@@ -0,0 +1,19 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const messages: FewShotExample = {
context: Context.MathBlock,
input: String.raw`# Sample mean
The sample mean, or sometime called average, is defined as:
$$
sample\_mean(x) = <mask/>
$$
The average value has the property that 50% of the weighted* value will be above and below it. This weighted property can make it more sensitive to outliers than the median.
`,
answer: String.raw`THOUGHT: The <mask/> is located in math block. Based on the text before and after the mask my answer should be the latex formula for the sample mean.
ANSWER: \frac{1}{n} \sum_i^n x_i`,
};
export default messages;

View File

@@ -0,0 +1,20 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.NumberedList,
input: `# Binary search
Binary is a sorting O(log(n)) sorting algorithm. It works as follows:
1. Ensure you have a sorted array.
2. Check the middle element in the list:
3. Return the index if this is the item you are looking for.
4. <mask/>
`,
answer: `THOUGHT: THOUGHT: The <mask/> is located inside a Markdown list. The current number 4 and it is already written so I should not be in the answer. The text before the <mask/> is bout the binary search algorithm and steps in the algorithm but a few steps are missing. There is no text after the <mask/>. So my answer should the remaining steps of the algorithm.
ANSWER: Go to step 2 with the remaining right half if the item is smaller than the target.
5. Go to step 2 with the remaining left half if the item is larger than the target.
6. If there are no more elements to check, the return indicates that the item is not in the list.`,
};
export default example;

View File

@@ -0,0 +1,26 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.TaskList,
input: ` # Prepare for conference
Before going to a conference, there are a few things to do:
- [ ] Finish presentation
- [ ] Write outline
- [ ] Create slides
- [ ] Practice presentation
- [ ] Book flights
- [ ] Reserve hotel
- [ ] Pack suitcase
- [ ] <mask/>
- [ ] Arrange transportation to airport
`,
answer: `THOUGHT: The <mask/> is located inside a Markdown subtask list. The tasks before and after the <mask/> are about preparing and traveling to a conference. The parent task is about packing a suitcase. The syntax for the subtask is already there, so I don't need to write it. So my answer should finish that subtask. Additionally, the answer can also contain additional subtasks that are not already there.
ANSWER:Clothes
- [ ] Toiletries
- [ ] Travel documents
`,
};
export default example;

View File

@@ -0,0 +1,20 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.TaskList,
input: ` # Write blog post about Obsidian
For my Obsidian blog post, I need to do the following:
- [ ] Research about Obsidian.
- [ ] Create an outline for the blog.<mask/>
`,
answer: `THOUGHT: The <mask/> is located inside a Markdown task list. The tasks before the <mask/> are about writing a blog about Obsidian. There is no text after the <mask/>. The task before the <mask/> looks already finished. So my answer should contain additional tasks that are not already there and should be help to complete this blog post.
ANSWER:
- [ ] Gather relevant visual aids.
- [ ] Write the first draft
- [ ] Review and edit the first draft
`,
};
export default example;

View File

@@ -0,0 +1,21 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.Text,
input: `# Locality-sensitive hashing (LSH)
Locality-sensitive hashing (LSH) is an algorithm that hashes similar items into the same buckets with high probability.
## Potential problems
### Collision (AND)
This happens when distant points are hashed into the same bucket. <mask/>
### Split (OR)
Nearby points are hashed into different buckets. This problem can be solved by using multiple hash tables instead of one. Points are candidates neighbors if they are a candidate in any of the hash tables. As a result the false negative rate reduces significantly, while the false positives rate only increase slightly.
`,
answer: `THOUGHT: The <mask/> is located in a paragraph at the end of sentence, so the answer should offer a completion of this paragraph. The paragraph is in English so the answer must be in this language. The text be the <mask/> is about Locality-sensitive hashing and its potential problems, so my answer should reflect that. The text after the <mask/> is a similiar paragraph about the second problem of Locality-sensitive hashing. So my answer should be about the split problem problem, so my answer should have a similar structure as this paragraph.
ANSWER:This problem can be solved by having multiple projection/hashes per table, where points are candidates neighbors if they occur in all query bins. As a result the false positive rate reduces significantly, while the false negative rate only increase slightly.`,
};
export default example;

View File

@@ -0,0 +1,13 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.Text,
input: `# Digitizing sound waves
Typical sound waves are complex and consist of multiple waves each with their amplitude, frequency, and phase. When we digitize a sound, we sample the amplitude, the difference compared to the base level, at fixed intervals. This gives a sequence of digital values that can be used to approximate the original sound wave by recreating the pressure changes over time. <mask/> The sampling rate must be chosen correctly, else we cannot represent specific frequencies, or we introduce unintended distortions.`,
answer: `THOUGHT: The paragraph is in English so the answer must be in this language. The text before the <mask/> is about digitizing sound waves. The text after the <mask/> mentions the sampling rate, but it never explains what it is. So the answer should be a sentence that explains what the sampling rate is writing in such a way that it fits with the text before and after the <mask/>.
ANSWER: The chosen interval is known as the sampling rate.`,
};
export default example;

View File

@@ -0,0 +1,24 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.UnorderedList,
input: `# Relu activation function
The ReLU activation function is a relatively simple non-linear function:
$$
ReLU(x) = max(0, x)
$$
Advantages:
- <mask/>
Disadvantages:
- Dead ReLU problem, whereby specific activation will only output zeros and thus will not have any gradients. This can be computationally wasteful since we still need matrix multiplication.
- Range $[0, \\infty]$ so Exploding Gradients can still be a problem.
`,
answer: `THOUGHT: The <mask/> is located inside a Markdown list. The dash is already there, so I don't need to write it. The text before the <mask/> is about the ReLU activation function, and it looks like this list is about the advantages of the relu function. The text after the <mask/> is about the disadvantages of the ReLU function. So my answer should finish the advantage item and maybe suggest a new advantage.
ANSWER: Computational cheap activations and gradients.
- Vanishing gradient problem is rare, assuming correct weight initialization.`,
};
export default example;

View File

@@ -0,0 +1,18 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../shared";
const example: FewShotExample = {
context: Context.UnorderedList,
input: `# SOLID
SOLID is a set of design principles from Robert C. Martin. It consists of the following sub-principles:
- S: Single Responsibility Principle: A class should have only one reason to change, meaning it should only have one job or responsibility.<mask/>
- I: Interface Segregation Principle: Clients should not be forced to depend on interfaces they do not use.
- D: Dependency Inversion Principle: High-level modules should not depend on low-level modules, but both should depend on abstractions.
`,
answer: `THOUGHT: The <mask/> is located inside a Markdown list. The text before the list is about SOLID. The list item before the mask is about Single Responsibility Principle. The list items Interface Segregation and Dependency Inversion principles. So the Open-Closed and Liskov Substitution principles are missing. So my answer should contain these list items.
ANSWER:
- O: Open-Closed Principle: Software entities should be open for extension, but closed for modification.
- L: Liskov Substitution Principle: Subtypes must be substitutable for their base types without altering the correctness of the program.`,
};
export default example;

View File

@@ -0,0 +1,144 @@
import { z } from "zod";
import { azureOAIApiSettingsSchema, fewShotExampleSchema, MAX_DELAY, MAX_MAX_CHAR_LIMIT, MIN_DELAY, MIN_MAX_CHAR_LIMIT, modelOptionsSchema, openAIApiSettingsSchema } from "../shared";
import block_qoute_example from "./few-shot-examples/block-qoute-example";
import codeblock_function_completion from "./few-shot-examples/codeblock-function-completion";
import codeblock_function_parameters from "./few-shot-examples/codeblock-function-parameters";
import header_example from "./few-shot-examples/header-example";
import header_example_relu from "./few-shot-examples/header-example-relu";
import math_block_inline from "./few-shot-examples/math-block-inline";
import math_block_multi_line from "./few-shot-examples/math-block-multi-line";
import numbered_list_example from "./few-shot-examples/numbered-list-example";
import sub_task_list_example from "./few-shot-examples/sub-task-list-example";
import task_list_example from "./few-shot-examples/task-list-example";
import text_completion_end from "./few-shot-examples/text-completion-end";
import text_completion_middle from "./few-shot-examples/text-completion-middle";
import unordered_list_pro_and_con_list from "./few-shot-examples/unordered-list-pro-and-con-list";
import unordered_list_solid from "./few-shot-examples/unordered-list-solid";
export const triggerSchema = z.object({
type: z.enum(['string', 'regex']),
value: z.string(),
}).strict();
export const settingsSchema = z.object({
enabled: z.boolean(),
advancedMode: z.boolean(),
apiProvider: z.enum(['azure', 'openai']),
azureOAIApiSettings: azureOAIApiSettingsSchema,
openAIApiSettings: openAIApiSettingsSchema,
triggers: z.array(triggerSchema),
delay: z.number().int().min(MIN_DELAY, { message: "Delay must be between 0ms and 2000ms" }).max(MAX_DELAY, { message: "Delay must be between 0ms and 2000ms" }),
modelOptions: modelOptionsSchema,
systemMessage: z.string().min(3, { message: "System message must be at least 3 characters long" }),
fewShotExamples: z.array(fewShotExampleSchema),
userMessageTemplate: z.string().min(3, { message: "User message template must be at least 3 characters long" }),
chainOfThoughRemovalRegex: z.string(),
dontIncludeDataviews: z.boolean(),
// TODO: see if we can replace this with tokens in the future.
maxPrefixCharLimit: z.number().int().min(MIN_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at least ${MIN_MAX_CHAR_LIMIT}` }).max(MAX_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at most ${MAX_MAX_CHAR_LIMIT}` }),
maxSuffixCharLimit: z.number().int().min(MIN_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at least ${MIN_MAX_CHAR_LIMIT}` }).max(MAX_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at most ${MAX_MAX_CHAR_LIMIT}` }),
removeDuplicateMathBlockIndicator: z.boolean(),
removeDuplicateCodeBlockIndicator: z.boolean()
}).strict();
export const pluginDataSchema = z.object({
settings: settingsSchema,
}).strict();
export const DEFAULT_SETTINGS: Settings = {
// General settings
enabled: true,
advancedMode: false,
apiProvider: "openai",
// API settings
azureOAIApiSettings: {
key: "",
url: "",
},
openAIApiSettings: {
key: "",
url: "https://api.openai.com/v1/chat/completions",
model: "gpt-3.5-turbo",
},
// Trigger settings
triggers: [
{ type: "string", value: "# " },
{ type: "string", value: ". " },
{ type: "string", value: ": " },
{ type: "string", value: ", " },
{ type: "string", value: "! " },
{ type: "string", value: "? " },
{ type: "string", value: "`" },
{ type: "string", value: "' " },
{ type: "string", value: "= " },
{ type: "string", value: "$ " },
{ type: "string", value: "\n" },
// bullet list
{ type: "regex", value: "[\\t ]*(\\-|\\*)[\\t ]+$" },
// numbered list
{ type: "regex", value: "[\\t ]*[0-9A-Za-z]+\\.[\\t ]+$" },
// new line with spaces
{ type: "regex", value: "\\$\\$\\n[\\t ]*$" },
// markdown multiline code block
{ type: "regex", value: "```[a-zA-Z0-9]*(\\n\\s*)?$" },
// task list normal, sub or numbered.
{ type: "regex", value: "\\s*(-|[0-9]+\\.) \\[.\\]\\s+$" },
],
delay: 1000,
// Request settings
modelOptions: {
temperature: 1,
top_p: 0.1,
frequency_penalty: 0.25,
presence_penalty: 0,
max_tokens: 800,
},
// Prompt settings
systemMessage: `Your job is to predict the most logical text that should be written at the location of the <mask/>.
Your answer can be either code, a single word, or multiple sentences.
Your answer must be in the same language as the text that is already there.
Your response must have the following format:
THOUGHT: here you explain your reasoning of what could be at the location of <mask/>
ANSWER: here you write the text that should be at the location of <mask/>
`,
fewShotExamples: [
block_qoute_example,
codeblock_function_completion,
codeblock_function_parameters,
header_example,
numbered_list_example,
sub_task_list_example,
task_list_example,
text_completion_end,
text_completion_middle,
unordered_list_pro_and_con_list,
unordered_list_solid,
math_block_inline,
math_block_multi_line,
header_example_relu,
].sort((a, b) => a.toString().localeCompare(b.toString())),
userMessageTemplate: "{{prefix}}<mask/>{{suffix}}",
chainOfThoughRemovalRegex: `(.|\\n)*ANSWER:`,
// Preprocessing settings
dontIncludeDataviews: true,
maxPrefixCharLimit: 2000,
maxSuffixCharLimit: 2000,
// Postprocessing settings
removeDuplicateMathBlockIndicator: true,
removeDuplicateCodeBlockIndicator: true,
};
export const DEFAULT_PLUGIN_DATA: PluginData = {
settings: DEFAULT_SETTINGS,
}
export type Settings = z.input<typeof settingsSchema>;
export type Trigger = z.infer<typeof triggerSchema>;
export type PluginData = z.infer<typeof pluginDataSchema>;

View File

@@ -0,0 +1,18 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.BlockQuotes,
input: `# Matthew effect
In the book Outliers, Malcolm Gladwell popularized the term Matthew effect.
This effect is named after the following New Testament verse:
> <mask/>
According to Malcolm Gladwell, the Matthew effect means that more successful people are most likely given special attention and opportunities leading to further success.`,
answer: `THOUGHT: The answer is a biblical verse from Matthew's effect, related to accumulating advantage; Gladwell mentions it in Outliers.
LANGUAGE: English
ANSWER: For unto everyone that hath shall be given, and he shall have abundance. But from him that hath not shall taken away even that which he hath.
> Matthew 25:29`,
};
export default example;

View File

@@ -0,0 +1,23 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const messages: FewShotExample = {
context: Context.CodeBlock,
input: `# debounce
A debounce function makes sure that a function is only triggered once per user input. This is useful for event based triggers. You can implement in javascript like this:
\`\`\`javascript
function debounce(func, timeout = 300){
<mask/>
}
\`\`\`
`,
answer: `THOUGHT: This should include debounce logic, clearTimeout, setTimeout, prevent rapid calls, and function wrapper.
LANGUAGE: JavaScript
ANSWER:let timer;
return (...args) => {
clearTimeout(timer);
timer = setTimeout(() => { func.apply(this, args); }, timeout);
};`,
};
export default messages;

View File

@@ -0,0 +1,19 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const messages: FewShotExample = {
context: Context.CodeBlock,
input: `\`\`\`python
def fibonacci(<mask/>) -> int:
\tif n == 0 or n == 1:
\t\treturn n
\telse:
\t\treturn fibonacci(n-1) + fibonacci(n-2)
\`\`\`
`,
answer: `THOUGHT: This function finds the nth Fibonacci number. The 'n' arg of type int is missing. Based on the location of <mask/>, the answer must be function arguments.
LANGUAGE: Python
ANSWER: n: int`,
};
export default messages;

View File

@@ -0,0 +1,16 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.Heading,
input: `# <mask/>
A neuron is considered dead if it does not activate for any of the training instance in the training dataset. Because it never activates it will never have a gradient due to the chain rule so it also cannot change anymore. The dead ReLU problem can have due to a wide variety of reasons, such as:
1. Poorly initialized weights.
2. Extremely high learning rates during training.
`,
answer: `THOUGHT: The paragraph discusses the ReLU activation function, and dead neurons never activate. This problem is named the "Dead ReLU problem".
LANGUAGE: English
ANSWER: The dead ReLU problem`,
};
export default example;

View File

@@ -0,0 +1,13 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.Heading,
input: `# The <mask/> function
The softmax function transforms a vector into a probability distribution such that the sum of the vector is equal to 1.`,
answer: `THOUGHT: The paragraph describes the softmax function and converts the vector to probability distributions; the title already contains "The" and "function". The answer must add the missing word to the title.
LANGUAGE: English
ANSWER: Softmax`,
};
export default example;

View File

@@ -0,0 +1,13 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const messages: FewShotExample = {
context: Context.MathBlock,
input: String.raw`# Logarithm definition
A logarithm is the power to which a base must be raised to yield a given number. For example, $2^3 =8$; therefore, $3$ is the logarithm of $8$ to base $2$, or in other words $ <mask/>$`,
answer: String.raw`The text close to the <mask/> is about the definition of the log and logarithm of 8 to base 2. The answer is an inline formula for base 2 of 8 equals 3.
LANGUAGE: LaTeX, English
ANSWER: 3 = \log_2(8)`,
};
export default messages;

View File

@@ -0,0 +1,19 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const messages: FewShotExample = {
context: Context.MathBlock,
input: String.raw`# Sample mean
The sample mean, or sometime called average, is defined as:
$$
sample\_mean(x) = <mask/>
$$
The average value has the property that 50% of the weighted* value will be above and below it. This weighted property can make it more sensitive to outliers than the median.
`,
answer: String.raw`THOUGHT: The text is about sample mean; the math block needs LaTeX for the sum of observations divided by the number of observations.
LANGUAGE: LaTeX, English
ANSWER: \frac{1}{n} \sum_i^n x_i`,
};
export default messages;

View File

@@ -0,0 +1,20 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.NumberedList,
input: `# Binary search
Binary is a sorting O(log(n)) searching algorithm. It works as follows:
1. Ensure you have a sorted array.
2. Check the middle element in the list:
3. Return the index if this is the item you are looking for.
4. <mask/>
5. Go to step 2 with the remaining left half if the item is larger than the target.
6. If there are no more elements to check, the return indicates that the item is not in the list.
`,
answer: `THOUGHT: The list contains steps of the binary search algorithm. It is missing the decision to split right if item < target.
LANGUAGE: English
ANSWER: Go to step 2 with the remaining right half if the item is smaller than the target.`,
};
export default example;

View File

@@ -0,0 +1,23 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.TaskList,
input: `# Prepare for conference
Before going to a conference, there are a few things to do:
- [ ] Finish presentation
- [ ] Write outline
- [ ] Create slides
- [ ] Practice presentation
- [ ] Book flights
- [ ] Reserve hotel
- [ ] Pack suitcase
- [ ] <mask/>
- [ ] Arrange transportation to airport
`,
answer: `THOUGHT: The answer must be a subtask of 'Pack suitcase'; typical subtasks: 'Clothes,' 'Toiletries,' 'Travel documents'; ' - [ ] ' already there.
LANGUAGE: English
ANSWER: Clothes`,
};
export default example;

View File

@@ -0,0 +1,19 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.TaskList,
input: ` # Write blog post about Obsidian
For my Obsidian blog post, I need to do the following:
- [ ] Research about Obsidian.
- [ ] Create an outline for the blog.
- [ ] Gather relevant visual aids.
- [ ] <mask/>
- [ ] Review and edit the first draft
`,
answer: `THOUGHT: The <mask/> is in the middle of a task sequence between gathering visuals before editing 1st draft. The 1st draft is missing and fits the sequence.
LANGUAGE: English
ANSWER: Write the first draft`,
};
export default example;

View File

@@ -0,0 +1,20 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.Text,
input: `# Locality-sensitive hashing (LSH)
Locality-sensitive hashing (LSH) is an algorithm that hashes similar items into the same buckets with high probability.
## Potential problems
### Collision (AND)
This happens when distant points are hashed into the same bucket. <mask/>
### Split (OR)
Nearby points are hashed into different buckets. This problem can be solved by using multiple hash tables instead of one. Points are candidates neighbors if they are a candidate in any of the hash tables. As a result the false negative rate reduces significantly, while the false positives rate only increase slightly.`,
answer: `THOUGHT: The answer must be the next sentence. It must explain the strategy to mitigate a collision problem, such as having multiple projections/hashes per table.
LANGUAGE: English
ANSWER: This problem can be solved by having multiple projections/hashes per table, where points are candidates' neighbors if they occur in all query bins.`,
};
export default example;

View File

@@ -0,0 +1,14 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.Text,
input: `# Digitizing sound waves
Typical sound waves are complex and consist of multiple waves, each with amplitude, frequency, and phase.
When we digitize a sound, we sample the amplitude, the difference compared to the base level, at fixed intervals. This gives a sequence of digital values that can be used to approximate the original sound wave by recreating the pressure changes over time. The <mask/> is known as the sampling rate. The sampling rate must be chosen correctly, or we cannot represent specific frequencies or introduce unintended distortions.`,
answer: `THOUGHT: The text is about digitizing sound waves and the needed properties. The <mask/> is an incomplete sentence starting with 'The' and ending with 'is known as the sampling rate.', I should avoid overlap with this. The answer is a description for the property known as the 'sampling rate'.
LANGUAGE: English
ANSWER: chosen interval`,
};
export default example;

View File

@@ -0,0 +1,24 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.UnorderedList,
input: `# Relu activation function
The ReLU activation function is a relatively simple non-linear function:
$$
ReLU(x) = max(0, x)
$$
Advantages:
- <mask/>
Disadvantages:
- Dead ReLU problem, whereby specific activation will only output zeros and thus will not have any gradients. This can be computationally wasteful since we still need matrix multiplication.
- Range $[0, \\infty]$ so Exploding Gradients can still be a problem.
`,
answer: `THOUGHT: Answer must be advantage of ReLU: simple, efficient, sparsity, addresses vanishing gradient to some extent, popular in practice. The "- " is already there.
LANGUAGE: English
ANSWER: Computational cheap activations and gradients.
- Vanishing gradient problem is rare, assuming correct weight initialization.`,
};
export default example;

View File

@@ -0,0 +1,19 @@
import Context from "../../../../core/autocomplete/context-detection";
import { FewShotExample } from "../../index";
const example: FewShotExample = {
context: Context.UnorderedList,
input: `# SOLID
SOLID is a set of design principles from Robert C. Martin. It consists of the following sub-principles:
- S: Single Responsibility Principle: A class should have only one reason to change, meaning it should only have one job or responsibility.
- <mask/>
- I: Interface Segregation Principle: Clients should not be forced to depend on interfaces they do not use.
- D: Dependency Inversion Principle: High-level modules should not depend on low-level modules, but both should depend on abstractions.
`,
answer: `THOUGHT: This is a list of SOLID principles. The "Open-Closed Principle" and "Liskov Substitution Principle" are missing. I must add them in a consistent format.
LANGUAGE: English
ANSWER: O: Open-Closed Principle: Software entities should be open for extension, but closed for modification.
- L: Liskov Substitution Principle: Subtypes must be substitutable for their base types without altering the correctness of the program.`,
};
export default example;

View File

@@ -0,0 +1,201 @@
import { z } from "zod";
import { isRegexValid, isValidIgnorePattern } from "../../../utils/auto-complete";
import {
azureOAIApiSettingsSchema, fewShotExampleSchema,
MAX_DELAY,
MAX_MAX_CHAR_LIMIT,
MIN_DELAY,
MIN_MAX_CHAR_LIMIT,
modelOptionsSchema,
ollamaApiSettingsSchema,
openAIApiSettingsSchema,
} from "../shared";
import block_qoute_example from "./few-shot-examples/block-qoute-example";
import codeblock_function_completion from "./few-shot-examples/codeblock-function-completion";
import codeblock_function_parameters from "./few-shot-examples/codeblock-function-parameters";
import header_example from "./few-shot-examples/header-example";
import header_example_relu from "./few-shot-examples/header-example-relu";
import math_block_inline from "./few-shot-examples/math-block-inline";
import math_block_multi_line from "./few-shot-examples/math-block-multiline";
import numbered_list_example from "./few-shot-examples/numbered-list-example";
import sub_task_list_example from "./few-shot-examples/subtask-list-example";
import task_list_example from "./few-shot-examples/task-list-example";
import text_completion_end from "./few-shot-examples/text-completion-end";
import text_completion_middle from "./few-shot-examples/text-completion-middle";
import unordered_list_pro_and_con_list from "./few-shot-examples/unordered-list-pro-and-con-list";
import unordered_list_solid from "./few-shot-examples/unordered-list-solid";
export const triggerSchema = z.object({
type: z.enum(['string', 'regex']),
value: z.string().min(1, { message: "Trigger value must be at least 1 character long" })
}).strict().superRefine((trigger, ctx) => {
if (trigger.type === "regex") {
if (!trigger.value.endsWith("$")) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Regex triggers must end with a $.",
path: ["value"],
});
}
if (!isRegexValid(trigger.value)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Invalid regex: "${trigger.value}"`,
path: ["value"],
});
}
}
});
export const settingsSchema = z.object({
version: z.literal("1"),
enabled: z.boolean(),
advancedMode: z.boolean(),
apiProvider: z.enum(['azure', 'openai', "ollama"]),
azureOAIApiSettings: azureOAIApiSettingsSchema,
openAIApiSettings: openAIApiSettingsSchema,
ollamaApiSettings: ollamaApiSettingsSchema,
triggers: z.array(triggerSchema),
delay: z.number().int().min(MIN_DELAY, { message: "Delay must be between 0ms and 2000ms" }).max(MAX_DELAY, { message: "Delay must be between 0ms and 2000ms" }),
modelOptions: modelOptionsSchema,
systemMessage: z.string().min(3, { message: "System message must be at least 3 characters long" }),
fewShotExamples: z.array(fewShotExampleSchema),
userMessageTemplate: z.string().min(3, { message: "User message template must be at least 3 characters long" }),
chainOfThoughRemovalRegex: z.string().refine((regex) => isRegexValid(regex), { message: "Invalid regex" }),
dontIncludeDataviews: z.boolean(),
maxPrefixCharLimit: z.number().int().min(MIN_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at least ${MIN_MAX_CHAR_LIMIT}` }).max(MAX_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at most ${MAX_MAX_CHAR_LIMIT}` }),
maxSuffixCharLimit: z.number().int().min(MIN_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at least ${MIN_MAX_CHAR_LIMIT}` }).max(MAX_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at most ${MAX_MAX_CHAR_LIMIT}` }),
removeDuplicateMathBlockIndicator: z.boolean(),
removeDuplicateCodeBlockIndicator: z.boolean(),
ignoredFilePatterns: z.string().refine((value) => value
.split("\n")
.filter(s => s.trim().length > 0)
.filter(s => !isValidIgnorePattern(s)).length === 0,
{ message: "Invalid ignore pattern" }
),
ignoredTags: z.string().refine((value) => value
.split("\n")
.filter(s => s.includes(" ")).length === 0, { message: "Tags cannot contain spaces" }
).refine((value) => value
.split("\n")
.filter(s => s.includes("#")).length === 0, { message: "Enter tags without the # symbol" }
).refine((value) => value
.split("\n")
.filter(s => s.includes(",")).length === 0, { message: "Enter each tag on a new line without commas" }
),
cacheSuggestions: z.boolean(),
debugMode: z.boolean(),
}).strict();
export const pluginDataSchema = z.object({
settings: settingsSchema,
}).strict();
export const DEFAULT_AUTOCOMPLETE_SETTINGS = {
// version: "1",
// General settings
autocompleteEnabled: true,
advancedMode: false,
apiProvider: "openai",
// API settings
azureOAIApiSettings: {
key: "",
url: "https://YOUR_AOI_SERVICE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions",
},
openAIApiSettings: {
key: "",
url: "https://api.openai.com/v1/chat/completions",
model: "gpt-3.5-turbo",
},
ollamaApiSettings: {
url: "http://localhost:11434/api/chat",
model: "",
},
// Trigger settings
triggers: [
{ type: "string", value: "# " },
{ type: "string", value: ". " },
{ type: "string", value: ": " },
{ type: "string", value: ", " },
{ type: "string", value: "! " },
{ type: "string", value: "? " },
{ type: "string", value: "`" },
{ type: "string", value: "' " },
{ type: "string", value: "= " },
{ type: "string", value: "$ " },
{ type: "string", value: "> " },
{ type: "string", value: "\n" },
// bullet list
{ type: "regex", value: "[\\t ]*(\\-|\\*)[\\t ]+$" },
// numbered list
{ type: "regex", value: "[\\t ]*[0-9A-Za-z]+\\.[\\t ]+$" },
// new line with spaces
{ type: "regex", value: "\\$\\$\\n[\\t ]*$" },
// markdown multiline code block
{ type: "regex", value: "```[a-zA-Z0-9]*(\\n\\s*)?$" },
// task list normal, sub or numbered.
{ type: "regex", value: "\\s*(-|[0-9]+\\.) \\[.\\]\\s+$" },
],
delay: 500,
// Request settings
modelOptions: {
temperature: 1,
top_p: 0.1,
frequency_penalty: 0.25,
presence_penalty: 0,
max_tokens: 800,
},
// Prompt settings
systemMessage: `Your job is to predict the most logical text that should be written at the location of the <mask/>.
Your answer can be either code, a single word, or multiple sentences.
If the <mask/> is in the middle of a partial sentence, your answer should only be the 1 or 2 words fixes the sentence and not the entire sentence.
You are not allowed to have any overlapping text directly surrounding the <mask/>.
Your answer must be in the same language as the text directly surrounding the <mask/>.
Your response must have the following format:
THOUGHT: here, you reason about the answer; use the 80/20 principle to be brief.
LANGUAGE: here, you write the language of your answer, e.g. English, Python, Dutch, etc.
ANSWER: here, you write the text that should be at the location of <mask/>
`,
fewShotExamples: [
block_qoute_example,
codeblock_function_completion,
codeblock_function_parameters,
header_example,
numbered_list_example,
sub_task_list_example,
task_list_example,
text_completion_end,
text_completion_middle,
unordered_list_pro_and_con_list,
unordered_list_solid,
math_block_inline,
math_block_multi_line,
header_example_relu,
].sort((a, b) => a.toString().localeCompare(b.toString())),
userMessageTemplate: "{{prefix}}<mask/>{{suffix}}",
chainOfThoughRemovalRegex: `(.|\\n)*ANSWER:`,
// Preprocessing settings
dontIncludeDataviews: true,
maxPrefixCharLimit: 4000,
maxSuffixCharLimit: 4000,
// Postprocessing settings
removeDuplicateMathBlockIndicator: true,
removeDuplicateCodeBlockIndicator: true,
ignoredFilePatterns: "**/secret/**\n",
ignoredTags: "",
cacheSuggestions: true,
debugMode: false,
};
export type Trigger = z.infer<typeof triggerSchema>;
export type Settings = z.input<typeof settingsSchema>;
export type PluginData = z.infer<typeof pluginDataSchema>;