Spaces:
Running
Running
Julian Bilcke
commited on
Commit
Β·
df46f1b
1
Parent(s):
c1b0fe7
upgrade to llama-3
Browse files- README.md +1 -1
- package-lock.json +6 -6
- public/index.html +2 -2
- src/{createLlamaPrompt.mts β createLlamaCoderPrompt.mts} +2 -2
- src/generateFiles.mts +17 -9
- src/getGradioApp.mts +8 -4
README.md
CHANGED
|
@@ -8,7 +8,7 @@ pinned: false
|
|
| 8 |
app_port: 7860
|
| 9 |
---
|
| 10 |
|
| 11 |
-
Generate Hugging Face Spaces using
|
| 12 |
|
| 13 |
The apps may not always work and usually human work is necessary to finish them.
|
| 14 |
See this project as "Hugging Face Space templates on steroids".
|
|
|
|
| 8 |
app_port: 7860
|
| 9 |
---
|
| 10 |
|
| 11 |
+
Generate Hugging Face Spaces using meta-llama/Meta-Llama-3-70B-Instruct
|
| 12 |
|
| 13 |
The apps may not always work and usually human work is necessary to finish them.
|
| 14 |
See this project as "Hugging Face Space templates on steroids".
|
package-lock.json
CHANGED
|
@@ -140,9 +140,9 @@
|
|
| 140 |
"integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w=="
|
| 141 |
},
|
| 142 |
"node_modules/@types/node": {
|
| 143 |
-
"version": "20.
|
| 144 |
-
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.
|
| 145 |
-
"integrity": "sha512-
|
| 146 |
"dependencies": {
|
| 147 |
"undici-types": "~5.26.4"
|
| 148 |
}
|
|
@@ -891,9 +891,9 @@
|
|
| 891 |
}
|
| 892 |
},
|
| 893 |
"node_modules/typescript": {
|
| 894 |
-
"version": "5.
|
| 895 |
-
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.
|
| 896 |
-
"integrity": "sha512-
|
| 897 |
"peer": true,
|
| 898 |
"bin": {
|
| 899 |
"tsc": "bin/tsc",
|
|
|
|
| 140 |
"integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w=="
|
| 141 |
},
|
| 142 |
"node_modules/@types/node": {
|
| 143 |
+
"version": "20.10.3",
|
| 144 |
+
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.3.tgz",
|
| 145 |
+
"integrity": "sha512-XJavIpZqiXID5Yxnxv3RUDKTN5b81ddNC3ecsA0SoFXz/QU8OGBwZGMomiq0zw+uuqbL/krztv/DINAQ/EV4gg==",
|
| 146 |
"dependencies": {
|
| 147 |
"undici-types": "~5.26.4"
|
| 148 |
}
|
|
|
|
| 891 |
}
|
| 892 |
},
|
| 893 |
"node_modules/typescript": {
|
| 894 |
+
"version": "5.3.2",
|
| 895 |
+
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.2.tgz",
|
| 896 |
+
"integrity": "sha512-6l+RyNy7oAHDfxC4FzSJcz9vnjTKxrLpDG5M2Vu4SHRVNg6xzqZp6LYSR9zjqQTu8DU/f5xwxUdADOkbrIX2gQ==",
|
| 897 |
"peer": true,
|
| 898 |
"bin": {
|
| 899 |
"tsc": "bin/tsc",
|
public/index.html
CHANGED
|
@@ -64,8 +64,8 @@
|
|
| 64 |
<div class="flex flex-col text-stone-700 space-y-1 md:space-y-2">
|
| 65 |
<p class="text-stone-700">
|
| 66 |
Model used:
|
| 67 |
-
<a href="https://huggingface.co/
|
| 68 |
-
|
| 69 |
</a>
|
| 70 |
</p>
|
| 71 |
<p>Powered by π€ <a href="https://huggingface.co/inference-api" class="underline" target="_blank">Inference API</a></p>
|
|
|
|
| 64 |
<div class="flex flex-col text-stone-700 space-y-1 md:space-y-2">
|
| 65 |
<p class="text-stone-700">
|
| 66 |
Model used:
|
| 67 |
+
<a href="https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct" class="underline" target="_blank">
|
| 68 |
+
meta-llama/Meta-Llama-3-70B-Instruct (you need to accept the terms)
|
| 69 |
</a>
|
| 70 |
</p>
|
| 71 |
<p>Powered by π€ <a href="https://huggingface.co/inference-api" class="underline" target="_blank">Inference API</a></p>
|
src/{createLlamaPrompt.mts β createLlamaCoderPrompt.mts}
RENAMED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
// adapted from https://huggingface.co/TheBloke/Llama-2-13B-chat-GPTQ/discussions/5
|
| 2 |
-
export function
|
| 3 |
const B_INST = "[INST]", E_INST = "[/INST]";
|
| 4 |
const B_SYS = "<<SYS>>\n", E_SYS = "\n<</SYS>>\n\n";
|
| 5 |
const BOS = "<s>", EOS = "</s>";
|
| 6 |
-
const DEFAULT_SYSTEM_PROMPT = "You are
|
| 7 |
|
| 8 |
if (messages[0].role != "system"){
|
| 9 |
messages = [
|
|
|
|
| 1 |
// adapted from https://huggingface.co/TheBloke/Llama-2-13B-chat-GPTQ/discussions/5
|
| 2 |
+
export function createLlamaCoderPrompt(messages: Array<{ role: string, content: string }>) {
|
| 3 |
const B_INST = "[INST]", E_INST = "[/INST]";
|
| 4 |
const B_SYS = "<<SYS>>\n", E_SYS = "\n<</SYS>>\n\n";
|
| 5 |
const BOS = "<s>", EOS = "</s>";
|
| 6 |
+
const DEFAULT_SYSTEM_PROMPT = "You are an expert coding assistant. Always write code as complete as possible, as the user doesn't have hands. You will receive a generous tip if you write correct code, so take a step back and think rationally about the architecture and data logic of your project workflow.";
|
| 7 |
|
| 8 |
if (messages[0].role != "system"){
|
| 9 |
messages = [
|
src/generateFiles.mts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import { HfInference } from '@huggingface/inference'
|
| 2 |
import { RepoFile } from './types.mts'
|
| 3 |
-
import {
|
| 4 |
import { parseTutorial } from './parseTutorial.mts'
|
| 5 |
import { getGradioApp } from './getGradioApp.mts'
|
| 6 |
import { getStreamlitApp } from './getStreamlitApp.mts'
|
|
@@ -28,7 +28,7 @@ export const generateFiles = async (
|
|
| 28 |
? getReactApp(prompt)
|
| 29 |
: getWebApp(prompt)
|
| 30 |
|
| 31 |
-
const inputs =
|
| 32 |
|
| 33 |
let isAbortedOrFailed = false
|
| 34 |
|
|
@@ -40,8 +40,11 @@ export const generateFiles = async (
|
|
| 40 |
onProgress(prefix)
|
| 41 |
|
| 42 |
for await (const output of hf.textGenerationStream({
|
| 43 |
-
|
| 44 |
-
model: "
|
|
|
|
|
|
|
|
|
|
| 45 |
inputs,
|
| 46 |
parameters: {
|
| 47 |
do_sample: true,
|
|
@@ -53,7 +56,10 @@ export const generateFiles = async (
|
|
| 53 |
// for "tiiuae/falcon-180B-chat":
|
| 54 |
// `inputs` tokens + `max_new_tokens` must be <= 8192
|
| 55 |
// error: `inputs` must have less than 4096 tokens.
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
| 57 |
return_full_text: false,
|
| 58 |
}
|
| 59 |
})) {
|
|
@@ -63,10 +69,12 @@ export const generateFiles = async (
|
|
| 63 |
// res.write(output.token.text)
|
| 64 |
if (
|
| 65 |
tutorial.includes('<|end|>')
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
| 70 |
tutorial = tutorial.replaceAll("</s>", "").replaceAll("<|end|>", "")
|
| 71 |
break
|
| 72 |
}
|
|
|
|
| 1 |
import { HfInference } from '@huggingface/inference'
|
| 2 |
import { RepoFile } from './types.mts'
|
| 3 |
+
import { createLlamaCoderPrompt } from './createLlamaCoderPrompt.mts'
|
| 4 |
import { parseTutorial } from './parseTutorial.mts'
|
| 5 |
import { getGradioApp } from './getGradioApp.mts'
|
| 6 |
import { getStreamlitApp } from './getStreamlitApp.mts'
|
|
|
|
| 28 |
? getReactApp(prompt)
|
| 29 |
: getWebApp(prompt)
|
| 30 |
|
| 31 |
+
const inputs = createLlamaCoderPrompt(instructions) + "\nSure! Here are the source files:\n" + prefix
|
| 32 |
|
| 33 |
let isAbortedOrFailed = false
|
| 34 |
|
|
|
|
| 40 |
onProgress(prefix)
|
| 41 |
|
| 42 |
for await (const output of hf.textGenerationStream({
|
| 43 |
+
|
| 44 |
+
model: "meta-llama/Meta-Llama-3-70B-Instruct",
|
| 45 |
+
// model: "codellama/CodeLlama-34b-Instruct-hf",
|
| 46 |
+
// model: "ise-uiuc/Magicoder-CL-7B" // not supported by Hugging Face right now (no stream + max token is 250)
|
| 47 |
+
|
| 48 |
inputs,
|
| 49 |
parameters: {
|
| 50 |
do_sample: true,
|
|
|
|
| 56 |
// for "tiiuae/falcon-180B-chat":
|
| 57 |
// `inputs` tokens + `max_new_tokens` must be <= 8192
|
| 58 |
// error: `inputs` must have less than 4096 tokens.
|
| 59 |
+
|
| 60 |
+
// for Llama-3 it is 8192
|
| 61 |
+
max_new_tokens: 8192,
|
| 62 |
+
temperature: 0.8,
|
| 63 |
return_full_text: false,
|
| 64 |
}
|
| 65 |
})) {
|
|
|
|
| 69 |
// res.write(output.token.text)
|
| 70 |
if (
|
| 71 |
tutorial.includes('<|end|>')
|
| 72 |
+
|| tutorial.includes('<|eot_id|>')
|
| 73 |
+
|| tutorial.includes('<|start_header_id|>assistant<|end_header_id|>')
|
| 74 |
+
|| tutorial.includes('</s>')
|
| 75 |
+
|| tutorial.includes('[ENDINSTRUCTION]')
|
| 76 |
+
|| tutorial.includes('[/TASK]')
|
| 77 |
+
|| tutorial.includes('<|assistant|>')) {
|
| 78 |
tutorial = tutorial.replaceAll("</s>", "").replaceAll("<|end|>", "")
|
| 79 |
break
|
| 80 |
}
|
src/getGradioApp.mts
CHANGED
|
@@ -7,7 +7,7 @@ export function getGradioApp(prompt: string) {
|
|
| 7 |
{
|
| 8 |
role: "system",
|
| 9 |
content: [
|
| 10 |
-
`You are a Python developer, expert at crafting Gradio applications to deploy to Hugging Face
|
| 11 |
`Here is an example of a minimal Gradio application:`,
|
| 12 |
gradioDoc
|
| 13 |
].filter(item => item).join("\n")
|
|
@@ -16,12 +16,16 @@ export function getGradioApp(prompt: string) {
|
|
| 16 |
role: "user",
|
| 17 |
content: `Please write, file by file, the source code for a Gradio project.
|
| 18 |
|
| 19 |
-
You
|
| 20 |
-
- numpy
|
| 21 |
- gradio (version 3.39.0)
|
|
|
|
|
|
|
|
|
|
| 22 |
- matplotlib
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
Don't forget to write a README.md with the following header:
|
| 25 |
\`\`\`
|
| 26 |
---
|
| 27 |
license: apache-2.0
|
|
|
|
| 7 |
{
|
| 8 |
role: "system",
|
| 9 |
content: [
|
| 10 |
+
`You are a Python developer, expert at crafting Gradio applications to deploy to Hugging Face. You must generate valid Python code. Don't forget the requirements.txt files!`,
|
| 11 |
`Here is an example of a minimal Gradio application:`,
|
| 12 |
gradioDoc
|
| 13 |
].filter(item => item).join("\n")
|
|
|
|
| 16 |
role: "user",
|
| 17 |
content: `Please write, file by file, the source code for a Gradio project.
|
| 18 |
|
| 19 |
+
You MUST use the following Python modules:
|
|
|
|
| 20 |
- gradio (version 3.39.0)
|
| 21 |
+
|
| 22 |
+
You are free to use (if necessary) the following Python modules:
|
| 23 |
+
- numpy
|
| 24 |
- matplotlib
|
| 25 |
+
- diffusers
|
| 26 |
+
- transformers
|
| 27 |
|
| 28 |
+
Don't forget to write a README.md with the following header, or else you will be FIRED:
|
| 29 |
\`\`\`
|
| 30 |
---
|
| 31 |
license: apache-2.0
|