Skip to content

Commit 19e2039

Browse files
committed
Standardize HF_ACCESS_TOKEN -> HF_TOKEN
1 parent 730fe4a commit 19e2039

File tree

21 files changed

+41
-41
lines changed

21 files changed

+41
-41
lines changed

.github/workflows/lint-and-test.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,12 +50,12 @@ jobs:
5050
- name: Test
5151
run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test
5252
env:
53-
HF_ACCESS_TOKEN: ${{ secrets.HF_ACCESS_TOKEN }}
53+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
5454

5555
- name: Test in browser
5656
run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test:browser
5757
env:
58-
HF_ACCESS_TOKEN: ${{ secrets.HF_ACCESS_TOKEN }}
58+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
5959

6060
- name: E2E - start mock npm registry
6161
run: |
@@ -86,7 +86,7 @@ jobs:
8686
pnpm i --ignore-workspace --registry http://localhost:4874/
8787
pnpm start
8888
env:
89-
token: ${{ secrets.HF_ACCESS_TOKEN }}
89+
token: ${{ secrets.HF_TOKEN }}
9090

9191
- name: E2E test - svelte app build
9292
working-directory: e2e/svelte

README.md

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,9 @@ Get your HF access token in your [account settings](https://huggingface.co/setti
105105
```ts
106106
import { HfInference } from "@huggingface/inference";
107107

108-
const HF_ACCESS_TOKEN = "hf_...";
108+
const HF_TOKEN = "hf_...";
109109

110-
const inference = new HfInference(HF_ACCESS_TOKEN);
110+
const inference = new HfInference(HF_TOKEN);
111111

112112
// You can also omit "model" to use the recommended model for the task
113113
await inference.translation({
@@ -137,11 +137,11 @@ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the
137137
```ts
138138
import {HfAgent, LLMFromHub, defaultTools} from '@huggingface/agents';
139139

140-
const HF_ACCESS_TOKEN = "hf_...";
140+
const HF_TOKEN = "hf_...";
141141

142142
const agent = new HfAgent(
143-
HF_ACCESS_TOKEN,
144-
LLMFromHub(HF_ACCESS_TOKEN),
143+
HF_TOKEN,
144+
LLMFromHub(HF_TOKEN),
145145
[...defaultTools]
146146
);
147147

@@ -162,16 +162,16 @@ console.log(messages);
162162
```ts
163163
import { createRepo, uploadFile, deleteFiles } from "@huggingface/hub";
164164

165-
const HF_ACCESS_TOKEN = "hf_...";
165+
const HF_TOKEN = "hf_...";
166166

167167
await createRepo({
168168
repo: "my-user/nlp-model", // or {type: "model", name: "my-user/nlp-test"},
169-
credentials: {accessToken: HF_ACCESS_TOKEN}
169+
credentials: {accessToken: HF_TOKEN}
170170
});
171171

172172
await uploadFile({
173173
repo: "my-user/nlp-model",
174-
credentials: {accessToken: HF_ACCESS_TOKEN},
174+
credentials: {accessToken: HF_TOKEN},
175175
// Can work with native File in browsers
176176
file: {
177177
path: "pytorch_model.bin",
@@ -181,7 +181,7 @@ await uploadFile({
181181

182182
await deleteFiles({
183183
repo: {type: "space", name: "my-user/my-space"}, // or "spaces/my-user/my-space"
184-
credentials: {accessToken: HF_ACCESS_TOKEN},
184+
credentials: {accessToken: HF_TOKEN},
185185
paths: ["README.md", ".gitattributes"]
186186
});
187187
```

packages/agents/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ const uppercaseTool: Tool = {
131131
};
132132

133133
// pass it in the agent
134-
const agent = new HfAgent(process.env.HF_ACCESS_TOKEN,
134+
const agent = new HfAgent(process.env.HF_TOKEN,
135135
LLMFromHub("hf_...", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"),
136136
[uppercaseTool, ...defaultTools]);
137137
```

packages/agents/test/HfAgent.spec.ts

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,20 @@ import type { Data } from "../src/types";
44
import type { HfInference } from "@huggingface/inference";
55

66
const env = import.meta.env;
7-
if (!env.HF_ACCESS_TOKEN) {
8-
console.warn("Set HF_ACCESS_TOKEN in the env to run the tests for better rate limits");
7+
if (!env.HF_TOKEN) {
8+
console.warn("Set HF_TOKEN in the env to run the tests for better rate limits");
99
}
1010

1111
describe("HfAgent", () => {
1212
it("You can create an agent from the hub", async () => {
13-
const llm = LLMFromHub(env.HF_ACCESS_TOKEN, "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5");
14-
const agent = new HfAgent(env.HF_ACCESS_TOKEN, llm);
13+
const llm = LLMFromHub(env.HF_TOKEN, "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5");
14+
const agent = new HfAgent(env.HF_TOKEN, llm);
1515
expect(agent).toBeDefined();
1616
});
1717

1818
it("You can create an agent from an endpoint", async () => {
19-
const llm = LLMFromEndpoint(env.HF_ACCESS_TOKEN ?? "", "endpoint");
20-
const agent = new HfAgent(env.HF_ACCESS_TOKEN, llm);
19+
const llm = LLMFromEndpoint(env.HF_TOKEN ?? "", "endpoint");
20+
const agent = new HfAgent(env.HF_TOKEN, llm);
2121
expect(agent).toBeDefined();
2222
});
2323

@@ -42,7 +42,7 @@ describe("HfAgent", () => {
4242
},
4343
};
4444

45-
const agent = new HfAgent(env.HF_ACCESS_TOKEN, undefined, [uppercaseTool, ...defaultTools]);
45+
const agent = new HfAgent(env.HF_TOKEN, undefined, [uppercaseTool, ...defaultTools]);
4646
const code = `
4747
async function generate() {
4848
const output = uppercase("hello friends");
@@ -61,7 +61,7 @@ async function generate() {
6161
message(output);
6262
}`;
6363

64-
const agent = new HfAgent(env.HF_ACCESS_TOKEN);
64+
const agent = new HfAgent(env.HF_TOKEN);
6565

6666
await agent.evaluateCode(code).then((output) => {
6767
expect(output.length).toBeGreaterThan(0);
@@ -75,7 +75,7 @@ async function generate() {
7575
toolThatDoesntExist(aaa);
7676
}`;
7777

78-
const hf = new HfAgent(env.HF_ACCESS_TOKEN);
78+
const hf = new HfAgent(env.HF_TOKEN);
7979

8080
await hf.evaluateCode(code).then((output) => {
8181
expect(output.length).toBeGreaterThan(0);

packages/inference/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,7 @@ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the
504504
## Running tests
505505

506506
```console
507-
HF_ACCESS_TOKEN="your access token" pnpm run test
507+
HF_TOKEN="your access token" pnpm run test
508508
```
509509

510510
## Finding appropriate models

packages/inference/test/HfInference.spec.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,15 @@ import { readTestFile } from "./test-files";
88
const TIMEOUT = 60000 * 3;
99
const env = import.meta.env;
1010

11-
if (!env.HF_ACCESS_TOKEN) {
12-
console.warn("Set HF_ACCESS_TOKEN in the env to run the tests for better rate limits");
11+
if (!env.HF_TOKEN) {
12+
console.warn("Set HF_TOKEN in the env to run the tests for better rate limits");
1313
}
1414

1515
describe.concurrent(
1616
"HfInference",
1717
() => {
1818
// Individual tests can be ran without providing an api key, however running all tests without an api key will result in rate limiting error.
19-
const hf = new HfInference(env.HF_ACCESS_TOKEN);
19+
const hf = new HfInference(env.HF_TOKEN);
2020

2121
it("throws error if model does not exist", () => {
2222
expect(

packages/inference/test/vcr.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ if (env.VCR_MODE) {
2727

2828
VCR_MODE = env.VCR_MODE as MODE;
2929
} else {
30-
VCR_MODE = env.HF_ACCESS_TOKEN ? MODE.DISABLED : MODE.PLAYBACK;
30+
VCR_MODE = env.HF_TOKEN ? MODE.DISABLED : MODE.PLAYBACK;
3131
}
3232

3333
const originalFetch = globalThis.fetch;

packages/tasks/src/tasks/audio-classification/about.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i
5353
```javascript
5454
import { HfInference } from "@huggingface/inference";
5555

56-
const inference = new HfInference(HF_ACCESS_TOKEN);
56+
const inference = new HfInference(HF_TOKEN);
5757
await inference.audioClassification({
5858
data: await (await fetch("sample.flac")).blob(),
5959
model: "facebook/mms-lid-126",

packages/tasks/src/tasks/audio-to-audio/about.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to i
3535
```javascript
3636
import { HfInference } from "@huggingface/inference";
3737

38-
const inference = new HfInference(HF_ACCESS_TOKEN);
38+
const inference = new HfInference(HF_TOKEN);
3939
await inference.audioToAudio({
4040
data: await (await fetch("sample.flac")).blob(),
4141
model: "speechbrain/sepformer-wham",

packages/tasks/src/tasks/automatic-speech-recognition/about.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to t
5454
```javascript
5555
import { HfInference } from "@huggingface/inference";
5656

57-
const inference = new HfInference(HF_ACCESS_TOKEN);
57+
const inference = new HfInference(HF_TOKEN);
5858
await inference.automaticSpeechRecognition({
5959
data: await (await fetch("sample.flac")).blob(),
6060
model: "openai/whisper-large-v2",

0 commit comments

Comments
 (0)