Skip to content

Commit

Permalink
docs: update
Browse files Browse the repository at this point in the history
  • Loading branch information
0xcadams committed Nov 8, 2023
1 parent 2d1cb18 commit 81fc8d2
Show file tree
Hide file tree
Showing 11 changed files with 147 additions and 147 deletions.
2 changes: 1 addition & 1 deletion docs/frameworks/next-13.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ const openaiClient = new OpenAI({ apiKey: "OPENAI_API_KEY" });
// Pass the OpenAI client into Hopfield
const hopfield = hop.client(openai).provider(openaiClient);
// Create a streaming chat provider
const chat = hopfield.chat("gpt-3.5-turbo-16k-0613").streaming();
const chat = hopfield.chat("gpt-3.5-turbo-16k-1106").streaming();

export type ChatResponseProps = {
prompt: string;
Expand Down
2 changes: 1 addition & 1 deletion examples/bun/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ const openaiClient = new OpenAI({

const hopfield = hop.client(openai).provider(openaiClient);

const chat = hopfield.chat('gpt-3.5-turbo-16k-0613');
const chat = hopfield.chat('gpt-3.5-turbo-1106');

test('test non-streaming chat', async () => {
const result = await chat.get({
Expand Down
2 changes: 1 addition & 1 deletion examples/cloudflare-worker/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ export default {

const hopfield = hop.client(openai).provider(openaiClient);

const chat = hopfield.chat('gpt-3.5-turbo-16k-0613').streaming();
const chat = hopfield.chat('gpt-3.5-turbo-1106').streaming();

const result = await chat.get({
temperature: 0,
Expand Down
2 changes: 1 addition & 1 deletion examples/next-13/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ const openaiClient = new OpenAI({
const hopfield = hop.client(openai).provider(openaiClient);

// Create the Hopfield streaming chat provider
const chat = hopfield.chat("gpt-3.5-turbo-16k-0613").streaming();
const chat = hopfield.chat("gpt-3.5-turbo-16k-1106").streaming();
```

### Constructing Messages & Streaming
Expand Down
42 changes: 21 additions & 21 deletions examples/next-13/src/app/code-chat.tsx
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
import { Suspense } from 'react';
import { Suspense } from "react";

import hop from 'hopfield';
import openai from 'hopfield/openai';
import OpenAI from 'openai';
import { kv } from '@vercel/kv';
import { docs } from './docs';
import { hashString } from './hash';
import hop from "hopfield";
import openai from "hopfield/openai";
import OpenAI from "openai";
import { kv } from "@vercel/kv";
import { docs } from "./docs";
import { hashString } from "./hash";

// Create an OpenAI API client
const openaiClient = new OpenAI({
apiKey: process.env.OPENAI_API_KEY || '',
apiKey: process.env.OPENAI_API_KEY || "",
});

// Instantiate a new Hopfield client with the OpenAI API client
const hopfield = hop.client(openai).provider(openaiClient);

// Create the Hopfield streaming chat provider
const chat = hopfield.chat('gpt-3.5-turbo-16k-0613').streaming();
const chat = hopfield.chat("gpt-3.5-turbo-1106").streaming();

const prompt = `Provide a cool use of Hopfield from the context below, with a short paragraph introduction of what Hopfield does, and then a Typescript example in 20 lines of code or less: \n\n${docs}`;

Expand All @@ -32,12 +32,12 @@ export async function CodeChat() {
// construct messages with hop.inferMessageInput
const messages: hop.inferMessageInput<typeof chat>[] = [
{
role: 'system',
role: "system",
content:
'You are a developer evangelist for the Hopfield Typescript npm package. You ALWAYS respond using Markdown. The docs for Hopfield are located at https://hopfield.ai.',
"You are a developer evangelist for the Hopfield Typescript npm package. You ALWAYS respond using Markdown. The docs for Hopfield are located at https://hopfield.ai.",
},
{
role: 'user',
role: "user",
content: prompt,
},
];
Expand All @@ -52,11 +52,11 @@ export async function CodeChat() {
// we map to a string to store in Redis, to save on costs :sweat:
const storedResponse = chunks
.map((chunk) =>
chunk.choices[0].__type === 'content'
chunk.choices[0].__type === "content"
? chunk.choices[0].delta.content
: '',
: ""
)
.join('');
.join("");

await kv.set(promptHash, storedResponse);
// expire every ten minutes
Expand Down Expand Up @@ -105,7 +105,7 @@ async function RecursiveTokens({ reader }: RecursiveTokensProps) {

return (
<>
{value.choices[0].__type === 'content' ? (
{value.choices[0].__type === "content" ? (
value.choices[0].delta.content
) : (
<></>
Expand All @@ -125,18 +125,18 @@ const getCachedResponse = async (prompt: string) => {
const cached = (await kv.get(prompt)) as string | undefined;

if (cached) {
const chunks = cached.split(' ');
const chunks = cached.split(" ");
const stream = new ReadableStream<hop.inferResult<typeof chat>>({
async start(controller) {
let id = 0;
for (const chunk of chunks) {
const fakeChunk: hop.inferResult<typeof chat> = {
model: 'gpt-3.5-turbo-16k-0613',
model: "gpt-3.5-turbo-1106",
id: String(id++),
created: Date.now(),
choices: [
{
__type: 'content',
__type: "content",
delta: {
content: `${chunk} `,
},
Expand All @@ -150,8 +150,8 @@ const getCachedResponse = async (prompt: string) => {
setTimeout(
r,
// get a random number between 10ms and 50ms to simulate a random delay
Math.floor(Math.random() * 40) + 10,
),
Math.floor(Math.random() * 40) + 10
)
);
}
controller.close();
Expand Down
14 changes: 7 additions & 7 deletions src/_test/openai-non-streaming.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ export const openaiBasicMessage = {
id: 'chatcmpl-8976324',
object: 'chat.completion',
created: 1690495858,
model: 'gpt-3.5-turbo-0613',
model: 'gpt-3.5-turbo-1106',
choices: [
{
index: 0,
Expand All @@ -24,7 +24,7 @@ export const openaiBasicFunctionCall = {
id: 'chatcmpl-5544332211',
object: 'chat.completion',
created: 1690825708,
model: 'gpt-3.5-turbo-0613',
model: 'gpt-3.5-turbo-1106',
choices: [
{
index: 0,
Expand All @@ -50,7 +50,7 @@ export const openaiLengthLimited = {
id: 'chatcmpl-1230789',
object: 'chat.completion',
created: 1690495920,
model: 'gpt-3.5-turbo-0613',
model: 'gpt-3.5-turbo-1106',
choices: [
{
index: 0,
Expand All @@ -72,7 +72,7 @@ export const openaiFunctionCallLengthLimited = {
id: 'chatcmpl-908213',
object: 'chat.completion',
created: 1690496036,
model: 'gpt-3.5-turbo-0613',
model: 'gpt-3.5-turbo-1106',
choices: [
{
index: 0,
Expand All @@ -98,7 +98,7 @@ export const openaiFunctionCall = {
id: 'chatcmpl-098234',
object: 'chat.completion',
created: 1690496097,
model: 'gpt-3.5-turbo-0613',
model: 'gpt-3.5-turbo-1106',
choices: [
{
index: 0,
Expand All @@ -124,7 +124,7 @@ export const openaiTwoResponses = {
id: 'chatcmpl-23490823',
object: 'chat.completion',
created: 1690496163,
model: 'gpt-3.5-turbo-0613',
model: 'gpt-3.5-turbo-1106',
choices: [
{
index: 0,
Expand Down Expand Up @@ -165,7 +165,7 @@ export const openaiAdversarial = {
},
],
created: 1690611868,
model: 'gpt-3.5-turbo-0613',
model: 'gpt-3.5-turbo-1106',
object: 'chat.completion',
usage: {
completion_tokens: 361,
Expand Down

1 comment on commit 81fc8d2

@vercel
Copy link

@vercel vercel bot commented on 81fc8d2 Nov 8, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.