Skip to content

Commit

Permalink
Merge pull request #297 from steven-tey/ai-features
Browse files Browse the repository at this point in the history
feat: add ai features example
  • Loading branch information
andrewdoro authored Mar 8, 2024
2 parents 357f7d0 + 122b3ee commit 06587d5
Show file tree
Hide file tree
Showing 24 changed files with 693 additions and 179 deletions.
104 changes: 84 additions & 20 deletions apps/web/app/api/generate/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,35 +2,34 @@ import OpenAI from "openai";
import { OpenAIStream, StreamingTextResponse } from "ai";
import { kv } from "@vercel/kv";
import { Ratelimit } from "@upstash/ratelimit";
import { match } from "ts-pattern";
import type { ChatCompletionMessageParam } from "openai/resources/index.mjs";

// Create an OpenAI API client (that's edge friendly!)
// Using LLamma's OpenAI client:

// IMPORTANT! Set the runtime to edge: https://vercel.com/docs/functions/edge-functions/edge-runtime
export const runtime = "edge";

const isProd = process.env.NODE_ENV === "production";
const llama = new OpenAI({
apiKey: "ollama",
baseURL: "http://localhost:11434/v1",
});

export async function POST(req: Request): Promise<Response> {
const openai = new OpenAI({
...(!isProd && {
baseURL: "http://localhost:11434/v1",
}),
apiKey: isProd ? process.env.OPENAI_API_KEY : "ollama",
apiKey: process.env.OPENAI_API_KEY,
});
// Check if the OPENAI_API_KEY is set, if not return 400
if (
(!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === "") &&
isProd
) {
if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === "") {
return new Response(
"Missing OPENAI_API_KEY - make sure to add it to your .env file.",
{
status: 400,
},
);
}
if (isProd && process.env.KV_REST_API_URL && process.env.KV_REST_API_TOKEN) {
if (process.env.KV_REST_API_URL && process.env.KV_REST_API_TOKEN) {
const ip = req.headers.get("x-forwarded-for");
const ratelimit = new Ratelimit({
redis: kv,
Expand All @@ -53,26 +52,91 @@ export async function POST(req: Request): Promise<Response> {
}
}

let { prompt } = await req.json();

const response = await openai.chat.completions.create({
model: process.env.NODE_ENV == "development" ? "llama2" : "gpt-3.5-turbo",
stream: true,
messages: [
let { prompt, option, command } = await req.json();
const messages = match(option)
.with("continue", () => [
{
role: "system",
content:
"You are an AI writing assistant that continues existing text based on context from prior text. " +
"Give more weight/priority to the later characters than the beginning ones. " +
"Limit your response to no more than 200 characters, but make sure to construct complete sentences.",
// we're disabling markdown for now until we can figure out a way to stream markdown text with proper formatting: https:/steven-tey/novel/discussions/7
// "Use Markdown formatting when appropriate.",
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: prompt,
},
],
])
.with("improve", () => [
{
role: "system",
content:
"You are an AI writing assistant that improves existing text. " +
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("shorter", () => [
{
role: "system",
content:
"You are an AI writing assistant that shortens existing text. " +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("longer", () => [
{
role: "system",
content:
"You are an AI writing assistant that lengthens existing text. " +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("fix", () => [
{
role: "system",
content:
"You are an AI writing assistant that fixes grammar and spelling errors in existing text. " +
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("zap", () => [
{
role: "system",
content:
"You area an AI writing assistant that generates text based on a prompt. " +
"You take an input from the user and a command for manipulating the text" +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `For this text: ${prompt}. You have to respect the command: ${command}`,
},
])
.run() as ChatCompletionMessageParam[];

const response = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
stream: true,
messages,
temperature: 0.7,
top_p: 1,
frequency_penalty: 0,
Expand Down
4 changes: 2 additions & 2 deletions apps/web/app/page.tsx
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import { Github } from "@/components/tailwind/ui/icons";
import { Button } from "@/components/tailwind/ui/button";
import Menu from "@/components/tailwind/ui/menu";
import TailwindEditor from "@/components/tailwind/editor";
import Link from "next/link";
import TailwindAdvancedEditor from "@/components/tailwind/advanced-editor";

export default function Page() {
return (
Expand All @@ -18,7 +18,7 @@ export default function Page() {
</Link>
<Menu />
</div>
<TailwindEditor />
<TailwindAdvancedEditor />
</div>
);
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,13 @@ import { defaultEditorContent } from "@/lib/content";
import React, { useEffect, useState } from "react";
import { useDebouncedCallback } from "use-debounce";
import {
EditorInstance,
EditorRoot,
EditorBubble,
EditorCommand,
EditorCommandItem,
EditorCommandEmpty,
EditorContent,
type JSONContent,
EditorInstance,
} from "novel";
import { ImageResizer, handleCommandNavigation } from "novel/extensions";
import { defaultExtensions } from "./extensions";
Expand All @@ -21,12 +20,13 @@ import { ColorSelector } from "./selectors/color-selector";

import { TextButtons } from "./selectors/text-buttons";
import { slashCommand, suggestionItems } from "./slash-command";
import GenerativeMenuSwitch from "./generative/generative-menu-switch";
import { handleImageDrop, handleImagePaste } from "novel/plugins";
import { uploadFn } from "./image-upload";

const extensions = [...defaultExtensions, slashCommand];

const TailwindEditor = () => {
const TailwindAdvancedEditor = () => {
const [initialContent, setInitialContent] = useState<null | JSONContent>(
null,
);
Expand All @@ -35,6 +35,7 @@ const TailwindEditor = () => {
const [openNode, setOpenNode] = useState(false);
const [openColor, setOpenColor] = useState(false);
const [openLink, setOpenLink] = useState(false);
const [openAI, setOpenAI] = useState(false);

const debouncedUpdates = useDebouncedCallback(
async (editor: EditorInstance) => {
Expand Down Expand Up @@ -82,7 +83,7 @@ const TailwindEditor = () => {
}}
slotAfter={<ImageResizer />}
>
<EditorCommand className="z-50 h-auto max-h-[330px] w-72 overflow-y-auto rounded-md border border-muted bg-background px-1 py-2 shadow-md transition-all">
<EditorCommand className="z-50 h-auto max-h-[330px] overflow-y-auto rounded-md border border-muted bg-background px-1 py-2 shadow-md transition-all">
<EditorCommandEmpty className="px-2 text-muted-foreground">
No results
</EditorCommandEmpty>
Expand All @@ -106,12 +107,7 @@ const TailwindEditor = () => {
))}
</EditorCommand>

<EditorBubble
tippyOptions={{
placement: "top",
}}
className="flex w-fit max-w-[90vw] overflow-hidden rounded border border-muted bg-background shadow-xl"
>
<GenerativeMenuSwitch open={openAI} onOpenChange={setOpenAI}>
<Separator orientation="vertical" />
<NodeSelector open={openNode} onOpenChange={setOpenNode} />
<Separator orientation="vertical" />
Expand All @@ -121,11 +117,11 @@ const TailwindEditor = () => {
<TextButtons />
<Separator orientation="vertical" />
<ColorSelector open={openColor} onOpenChange={setOpenColor} />
</EditorBubble>
</GenerativeMenuSwitch>
</EditorContent>
</EditorRoot>
</div>
);
};

export default TailwindEditor;
export default TailwindAdvancedEditor;
4 changes: 3 additions & 1 deletion apps/web/components/tailwind/extensions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,14 @@ import {
HorizontalRule,
StarterKit,
Placeholder,
AIHighlight,
} from "novel/extensions";
import { UploadImagesPlugin } from "novel/plugins";

import { cx } from "class-variance-authority";

//TODO I am using cx here to get tailwind autocomplete working, idk if someone else can write a regex to just capture the class key in objects

const aiHighlight = AIHighlight;
//You can overwrite the placeholder with your own configuration
const placeholder = Placeholder;
const tiptapLink = TiptapLink.configure({
Expand Down Expand Up @@ -114,4 +115,5 @@ export const defaultExtensions = [
taskList,
taskItem,
horizontalRule,
aiHighlight,
];
67 changes: 67 additions & 0 deletions apps/web/components/tailwind/generative/ai-completion-command.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import React from "react";
import { CommandGroup, CommandItem, CommandSeparator } from "../ui/command";
import { useEditor } from "novel";
import { Check, TextQuote, TrashIcon } from "lucide-react";

const AICompletionCommands = ({
completion,
onDiscard,
}: {
completion: string;
onDiscard: () => void;
}) => {
const { editor } = useEditor();
return (
<>
<CommandGroup>
<CommandItem
className="gap-2 px-4"
value="replace"
onSelect={() => {
const selection = editor.view.state.selection;

editor
.chain()
.focus()
.insertContentAt(
{
from: selection.from,
to: selection.to,
},
completion,
)
.run();
}}
>
<Check className="h-4 w-4 text-muted-foreground" />
Replace selection
</CommandItem>
<CommandItem
className="gap-2 px-4"
value="insert"
onSelect={() => {
const selection = editor.view.state.selection;
editor
.chain()
.focus()
.insertContentAt(selection.to + 1, completion)
.run();
}}
>
<TextQuote className="h-4 w-4 text-muted-foreground" />
Insert below
</CommandItem>
</CommandGroup>
<CommandSeparator />

<CommandGroup>
<CommandItem onSelect={onDiscard} value="thrash" className="gap-2 px-4">
<TrashIcon className="h-4 w-4 text-muted-foreground" />
Discard
</CommandItem>
</CommandGroup>
</>
);
};

export default AICompletionCommands;
Loading

0 comments on commit 06587d5

Please sign in to comment.