diff --git a/README.md b/README.md index 7e06498..4e29dfd 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@ USAGE * [`heroku ai:docs`](#heroku-aidocs) * [`heroku ai:models`](#heroku-aimodels) +* [`heroku ai:models:create MODEL_NAME`](#heroku-aimodelscreate-model_name) * [`heroku ai:models:list`](#heroku-aimodelslist) ## `heroku ai:docs` @@ -64,6 +65,35 @@ EXAMPLES $ heroku ai:models:list ``` +## `heroku ai:models:create MODEL_NAME` + +provision access to an AI model + +``` +USAGE + $ heroku ai:models:create [MODEL_NAME] -a [--as ] [--confirm ] [-r ] + +ARGUMENTS + MODEL_NAME The name of the model to provision access for + +FLAGS + -a, --app= (required) The name of the Heroku app to attach the model to + -r, --remote= git remote of app to use + --as= alias name for model resource + --confirm= overwrite existing config vars or existing add-on attachments + +DESCRIPTION + provision access to an AI model + +EXAMPLES + # Provision access to an AI model and attach it to your app with a default name: + $ heroku ai:models:create claude-3-5-sonnet --app example-app + # Provision access to an AI model and attach it to your app with a custom name: + $ heroku ai:models:create stable-diffusion-xl --app example-app --as my_sdxl +``` + +_See code: [dist/commands/ai/models/create.ts](https://github.com/heroku/heroku-cli-plugin-integration/blob/v0.0.0/dist/commands/ai/models/create.ts)_ + ## `heroku ai:models:list` list available AI models to provision access to diff --git a/package.json b/package.json index fcac4f6..b4bf6d3 100644 --- a/package.json +++ b/package.json @@ -11,6 +11,7 @@ "@oclif/core": "^2.16.0", "@oclif/plugin-help": "^5", "open": "^8.4.2", + "printf": "^0.6.1", "tsheredoc": "^1" }, "devDependencies": { diff --git a/src/commands/ai/models/create.ts b/src/commands/ai/models/create.ts new file mode 100644 index 0000000..1ed1367 --- /dev/null +++ b/src/commands/ai/models/create.ts @@ -0,0 +1,84 @@ +import color from '@heroku-cli/color' +import {flags} from '@heroku-cli/command' +import {Args, ux} from '@oclif/core' +import heredoc from 'tsheredoc' +import createAddon from '../../../lib/ai/models/create_addon' +import Command from '../../../lib/base' +import {HerokuAPIError} from '@heroku-cli/command/lib/api-client' + +export default class Create extends Command { + static args = { + model_name: Args.string({ + description: 'The name of the model to provision access for', + required: true, + }), + } + + static description = 'provision access to an AI model' + static example = heredoc` + # Provision access to an AI model and attach it to your app with a default name: + $ heroku ai:models:create claude-3-5-sonnet --app example-app + # Provision access to an AI model and attach it to your app with a custom name: + $ heroku ai:models:create stable-diffusion-xl --app example-app --as my_sdxl + ` + static flags = { + app: flags.app({ + description: 'The name of the Heroku app to attach the model to', + required: true, + }), + as: flags.string({description: 'alias name for model resource'}), + confirm: flags.string({description: 'overwrite existing config vars or existing add-on attachments'}), + remote: flags.remote(), + } + + public async run(): Promise { + const {flags, args} = await this.parse(Create) + const {app, as, confirm} = flags + const {model_name: modelName} = args + + try { + const addon = await createAddon( + this.heroku, + app, + `${this.addonServiceSlug}:${modelName}`, + confirm, + {config: {}, as} + ) + + await this.config.runHook('recache', {type: 'addon', app, addon}) + ux.log(`Use ${color.cmd('heroku ai:docs to view documentation')}.`) + } catch (error: unknown) { + this.handleError(error, {as, modelName}) + } + } + + /** + * Error handler + * @param error Error thrown when attempting to create the add-on. + * @param cmdContext Context of the command that failed. + * @returns never + * + * There's a problem with this error handler implementation, because it relies on the specific error message + * returned from API in order to format the error correctly. This is prone to fail if changes are introduced + * upstream on error messages. We should rely on the error `id` but API returns a generic `invalid_params`. + */ + private handleError(error: unknown, cmdContext: {as?: string, modelName?: string} = {}): never { + if (error instanceof HerokuAPIError && error.body.id === 'invalid_params') { + if (error.body.message?.includes('start with a letter')) { + ux.error( + `${cmdContext.as} is an invalid alias name. It must start with a letter and can only contain uppercase letters, numbers, and underscores.`, + {exit: 1}, + ) + } + + if (error.body.message?.includes('add-on plan')) { + ux.error( + `${cmdContext.modelName} is an invalid model name. Run ${color.cmd('heroku ai:models:list')} for a list of valid models.`, + {exit: 1}, + ) + } + } + + throw error + } +} diff --git a/src/lib/ai/models/create_addon.ts b/src/lib/ai/models/create_addon.ts new file mode 100644 index 0000000..f550505 --- /dev/null +++ b/src/lib/ai/models/create_addon.ts @@ -0,0 +1,53 @@ +import {ux} from '@oclif/core' +import color from '@heroku-cli/color' +import * as Heroku from '@heroku-cli/schema' +import {APIClient} from '@heroku-cli/command' +import * as util from './util' + +// eslint-disable-next-line max-params +export default async function ( + heroku: APIClient, + app: string, + plan: string, + confirm: string | undefined, + options: {name?: string, config: Record, as?: string}, +) { + async function createAddonRequest(confirmed?: string) { + const body = { + confirm: confirmed, + name: options.name, + config: options.config, + plan: {name: plan}, + attachment: {name: options.as}, + } + + ux.action.start(`Creating ${plan} on ${color.app(app)}`) + + const {body: addon} = await heroku.post>(`/apps/${app}/addons`, { + body, + headers: { + 'accept-expansion': 'plan', + 'x-heroku-legacy-provider-messages': 'true', + }, + }).catch(error => { + ux.action.stop('') + throw error + }) + + ux.action.stop(color.green(util.formatPriceText(addon.plan?.price || ''))) + + return addon + } + + const addon = await util.trapConfirmationRequired>(app, confirm, confirm => (createAddonRequest(confirm))) + + if (addon.provision_message) { + ux.log(addon.provision_message) + } + + ux.log( + `Added ${addon.config_vars.map((c: string) => color.configVar(c)).join(', ')} to ${color.app(addon.app.name)}` + ) + + return addon +} diff --git a/src/lib/ai/models/util.ts b/src/lib/ai/models/util.ts new file mode 100644 index 0000000..b663cc8 --- /dev/null +++ b/src/lib/ai/models/util.ts @@ -0,0 +1,68 @@ +/* eslint-disable no-return-await */ +import color from '@heroku-cli/color' +import * as Heroku from '@heroku-cli/schema' +import printf from 'printf' +import confirmCommand from '../../confirmCommand' + +export const trapConfirmationRequired = async function (app: string, confirm: string | undefined, fn: (confirmed?: string) => Promise) { + return await fn(confirm) + .catch(async (error: any) => { + if (!error.body || error.body.id !== 'confirmation_required') + throw error + await confirmCommand(app, confirm, error.body.message) + return await fn(app) + }) +} + +// This function assumes that price.cents will reflect price per month. +// If the API returns any unit other than month +// this function will need to be updated. +export const formatPrice = function ({price, hourly}: {price: Heroku.AddOn['price'] | number, hourly?: boolean}) { + if (!price) return + if (price.contract) return 'contract' + if (price.cents === 0) return 'free' + + // we are using a standardized 720 hours/month + if (hourly) return `~$${((price.cents / 100) / 720).toFixed(3)}/hour` + + const fmt = price.cents % 100 === 0 ? '$%.0f/%s' : '$%.02f/%s' + return printf(fmt, price.cents / 100, price.unit) +} + +export const formatPriceText = function (price: Heroku.AddOn['price']) { + const priceHourly = formatPrice({price, hourly: true}) + const priceMonthly = formatPrice({price, hourly: false}) + if (!priceHourly) return '' + if (priceHourly === 'free' || priceHourly === 'contract') return `${color.green(priceHourly)}` + + return `${color.green(priceHourly)} (max ${priceMonthly})` +} + +export const grandfatheredPrice = function (addon: Heroku.AddOn) { + const price = addon.plan?.price + return Object.assign({}, price, { + cents: addon.billed_price?.cents, + contract: addon.billed_price?.contract, + }) +} + +export const formatState = function (state: string) { + switch (state) { + case 'provisioned': + state = 'created' + break + case 'provisioning': + state = 'creating' + break + case 'deprovisioning': + state = 'destroying' + break + case 'deprovisioned': + state = 'errored' + break + default: + state = '' + } + + return state +} diff --git a/src/lib/ai/types.ts b/src/lib/ai/types.ts index 927a5e5..da19f90 100644 --- a/src/lib/ai/types.ts +++ b/src/lib/ai/types.ts @@ -41,7 +41,7 @@ export type ModelInfo = { /** * Object schema for Model Status endpoint responses. */ -export type ModelInstance = { +export type ModelResource = { plan: ModelName created: string tokens_in: string diff --git a/src/lib/base.ts b/src/lib/base.ts index de542a4..f5a835c 100644 --- a/src/lib/base.ts +++ b/src/lib/base.ts @@ -9,8 +9,8 @@ import {HerokuAPIError} from '@heroku-cli/command/lib/api-client' export class NotFound extends Error { constructor(addonIdentifier: string, appIdentifier?: string) { const message = heredoc` - We can’t find a model instance called ${color.yellow(addonIdentifier)}${appIdentifier ? ` on ${color.app(appIdentifier)}` : ''}. - Run ${color.cmd(`heroku ai:models:info --app ${appIdentifier ? appIdentifier : ''}`)} to see a list of model instances. + We can’t find a model resource called ${color.yellow(addonIdentifier)}${appIdentifier ? ` on ${color.app(appIdentifier)}` : ''}. + Run ${color.cmd(`heroku ai:models:info --app ${appIdentifier ? appIdentifier : ''}`)} to see a list of model resources. ` super(message) } @@ -22,8 +22,8 @@ export class NotFound extends Error { export class AmbiguousError extends Error { constructor(public readonly matches: string[], addonIdentifier: string, appIdentifier?: string) { const message = heredoc` - Multiple model instances match ${color.yellow(addonIdentifier)}${appIdentifier ? ` on ${color.app(appIdentifier)}` : ''}: ${matches.map(match => color.addon(match)).join(', ')}. - Specify the model instance by its name instead. + Multiple model resources match ${color.yellow(addonIdentifier)}${appIdentifier ? ` on ${color.app(appIdentifier)}` : ''}: ${matches.map(match => color.addon(match)).join(', ')}. + Specify the model resource by its name instead. ` super(message) } @@ -36,7 +36,6 @@ export default abstract class extends Command { private _addon?: Required private _addonAttachment?: Required private _addonServiceSlug?: string - private _inferenceAddonSlugs = ['inference', 'inference-staging'] private _apiKey?: string private _apiModelId?: string private _apiUrl?: string @@ -62,6 +61,7 @@ export default abstract class extends Command { this._apiModelId = configVars[this.apiModelIdConfigVarName] || this.addon.plan.name?.split(':')[1] // Fallback to plan name (e.g. "inference:claude-3-haiku" => "claude-3-haiku" this._apiUrl = configVars[this.apiUrlConfigVarName] + this._addonServiceSlug = this.addon.addon_service.name this._herokuAI.defaults.host = this.apiUrl this._herokuAI.defaults.headers = { ...defaultHeaders, @@ -174,7 +174,7 @@ export default abstract class extends Command { } // 5. If we resolved for an add-on, check that it's a Managed Inference add-on or throw a NotFound error. - if (resolvedAddon && !this._inferenceAddonSlugs.includes(resolvedAddon.addon_service.name as string)) + if (resolvedAddon && resolvedAddon.addon_service.name !== this.addonServiceSlug) throw new NotFound(addonIdentifier, appIdentifier) // 6. If we resolved for an add-on but not for an attachment yet, try to resolve the attachment @@ -234,17 +234,16 @@ export default abstract class extends Command { } get addonServiceSlug(): string { - if (this._addonServiceSlug) - return this._addonServiceSlug - - ux.error('Heroku AI API Client not configured.', {exit: 1}) + return this._addonServiceSlug || + process.env.HEROKU_INFERENCE_ADDON || + 'inference' } get apiKey(): string { - if (this._apiKey) + if (this.addon && this._apiKey) return this._apiKey - ux.error(`Model instance ${color.addon(this.addon?.name)} isn’t fully provisioned on ${color.app(this.addon?.app.name)}.`, {exit: 1}) + ux.error(`Model resource ${color.addon(this.addon?.name)} isn’t fully provisioned on ${color.app(this.addon?.app.name)}.`, {exit: 1}) } get apiKeyConfigVarName(): string { @@ -260,10 +259,10 @@ export default abstract class extends Command { } get apiUrl(): string { - if (this._apiUrl) + if (this.addon && this._apiUrl) return this._apiUrl - ux.error(`Model instance ${color.addon(this.addon?.name)} isn’t fully provisioned on ${color.app(this.addon?.app.name)}.`, {exit: 1}) + ux.error(`Model resource ${color.addon(this.addon?.name)} isn’t fully provisioned on ${color.app(this.addon?.app.name)}.`, {exit: 1}) } get apiUrlConfigVarName(): string { diff --git a/src/lib/confirmCommand.ts b/src/lib/confirmCommand.ts new file mode 100644 index 0000000..d3b4a2d --- /dev/null +++ b/src/lib/confirmCommand.ts @@ -0,0 +1,29 @@ +import {color} from '@heroku-cli/color' +import {ux} from '@oclif/core' +import heredoc from 'tsheredoc' + +export default async function confirmCommand(app: string, confirm?: string | undefined, message?: string) { + if (confirm) { + if (confirm === app) return + throw new Error(`Confirmation ${color.bold.red(confirm)} did not match ${color.bold.red(app)}. Aborted.`) + } + + if (!message) { + message = heredoc` + Destructive Action. + This command will affect the app ${color.bold.red(app)}. + ` + } + + ux.warn(message) + console.error() + const entered = await ux.prompt( + `To proceed, type ${color.bold.red(app)} or re-run this command with ${color.bold.red('--confirm', app)}`, + {required: true}, + ) + if (entered === app) { + return + } + + throw new Error(`Confirmation did not match ${color.bold.red(app)}. Aborted.`) +} diff --git a/test/commands/ai/models/.keep b/test/commands/ai/models/.keep deleted file mode 100644 index e69de29..0000000 diff --git a/test/commands/ai/models/create.test.ts b/test/commands/ai/models/create.test.ts new file mode 100644 index 0000000..07d97da --- /dev/null +++ b/test/commands/ai/models/create.test.ts @@ -0,0 +1,270 @@ +import {ux} from '@oclif/core' +import {CLIError} from '@oclif/core/lib/errors' +import {expect} from 'chai' +import nock from 'nock' +import sinon from 'sinon' +import {stdout, stderr} from 'stdout-stderr' +import Cmd from '../../../../src/commands/ai/models/create' +import {runCommand} from '../../../run-command' +import stripAnsi from '../../../helpers/strip-ansi' +import {addon1Provisioned, addon1ProvisionedWithAttachmentName} from '../../../helpers/fixtures' +import heredoc from 'tsheredoc' + +describe('ai:models:create', function () { + const {env} = process + let api: nock.Scope + let sandbox: sinon.SinonSandbox + + beforeEach(async function () { + process.env = {} + api = nock('https://api.heroku.com:443') + sandbox = sinon.createSandbox() + }) + + afterEach(function () { + process.env = env + api.done() + nock.cleanAll() + sandbox.restore() + }) + + context('when creating a model resource with just the model name argument', function () { + beforeEach(function () { + api + .post('/apps/app1/addons', { + config: {}, + plan: {name: 'inference:claude-3-haiku'}, + attachment: {}, + }) + .reply(200, addon1Provisioned) + }) + + it('creates the model resource showing the appropriate output', async function () { + await runCommand(Cmd, [ + 'claude-3-haiku', + '--app=app1', + ]) + expect(stripAnsi(stderr.output)).to.eq(heredoc` + Creating inference:claude-3-haiku on app1... + Creating inference:claude-3-haiku on app1... free + `) + expect(stripAnsi(stdout.output)).to.eq(heredoc` + Heroku AI model resource provisioned successfully + Added INFERENCE_KEY, INFERENCE_MODEL_ID, INFERENCE_URL to app1 + Use heroku ai:docs to view documentation. + `) + }) + }) + + context('when using the --as= option', function () { + beforeEach(function () { + api + .post('/apps/app1/addons', { + config: {}, + plan: {name: 'inference:claude-3-haiku'}, + attachment: {name: 'CLAUDE_HAIKU'}, + }) + .reply(200, addon1ProvisionedWithAttachmentName) + }) + + it('creates the model resource passing the specified attachment name', async function () { + await runCommand(Cmd, [ + 'claude-3-haiku', + '--app=app1', + '--as=CLAUDE_HAIKU', + ]) + expect(stripAnsi(stderr.output)).to.eq(heredoc` + Creating inference:claude-3-haiku on app1... + Creating inference:claude-3-haiku on app1... free + `) + expect(stripAnsi(stdout.output)).to.eq(heredoc` + Heroku AI model resource provisioned successfully + Added CLAUDE_HAIKU_KEY, CLAUDE_HAIKU_ID, CLAUDE_HAIKU_URL to app1 + Use heroku ai:docs to view documentation. + `) + }) + }) + + context('when reusing an existing attachment name', function () { + it('requires interactive confirmation if the user didn’t use the --confirm option', async function () { + const prompt = sandbox.stub(ux, 'prompt').resolves('app1') + api + .post('/apps/app1/addons', { + config: {}, + plan: {name: 'inference:claude-3-haiku'}, + attachment: {name: 'CLAUDE_HAIKU'}, + }) + .reply(423, { + id: 'confirmation_required', + message: 'Adding CLAUDE_HAIKU to app app1 would overwrite existing vars CLAUDE_HAIKU_KEY, CLAUDE_HAIKU_MODEL_ID, and CLAUDE_HAIKU_URL.', + }) + .post('/apps/app1/addons', { + config: {}, + confirm: 'app1', + plan: {name: 'inference:claude-3-haiku'}, + attachment: {name: 'CLAUDE_HAIKU'}, + }) + .reply(200, addon1ProvisionedWithAttachmentName) + + await runCommand(Cmd, [ + 'claude-3-haiku', + '--app=app1', + '--as=CLAUDE_HAIKU', + ]) + expect(prompt.calledOnce).to.be.true + expect(stripAnsi(stderr.output)).to.contain('Adding CLAUDE_HAIKU to app app1 would overwrite existing vars') + expect(stripAnsi(stdout.output)).to.eq(heredoc` + Heroku AI model resource provisioned successfully + Added CLAUDE_HAIKU_KEY, CLAUDE_HAIKU_ID, CLAUDE_HAIKU_URL to app1 + Use heroku ai:docs to view documentation. + `) + }) + + it('doesn’t require interactive confirmation if the user used the correct --confirm option', async function () { + const prompt = sandbox.stub(ux, 'prompt') + api + .post('/apps/app1/addons', { + config: {}, + confirm: 'app1', + plan: {name: 'inference:claude-3-haiku'}, + attachment: {name: 'CLAUDE_HAIKU'}, + }) + .reply(200, addon1ProvisionedWithAttachmentName) + + await runCommand(Cmd, [ + 'claude-3-haiku', + '--app=app1', + '--as=CLAUDE_HAIKU', + '--confirm=app1', + ]) + expect(prompt.calledOnce).to.be.false + expect(stripAnsi(stderr.output)).not.to.contain('Adding CLAUDE_HAIKU to app app1 would overwrite existing vars') + expect(stripAnsi(stdout.output)).to.eq(heredoc` + Heroku AI model resource provisioned successfully + Added CLAUDE_HAIKU_KEY, CLAUDE_HAIKU_ID, CLAUDE_HAIKU_URL to app1 + Use heroku ai:docs to view documentation. + `) + }) + + it('fails if the user provides the wrong confirmation response interactively', async function () { + const prompt = sandbox.stub(ux, 'prompt').resolves('wrong-app-name') + api + .post('/apps/app1/addons', { + config: {}, + plan: {name: 'inference:claude-3-haiku'}, + attachment: {name: 'CLAUDE_HAIKU'}, + }) + .reply(423, { + id: 'confirmation_required', + message: 'Adding CLAUDE_HAIKU to app app1 would overwrite existing vars CLAUDE_HAIKU_KEY, CLAUDE_HAIKU_MODEL_ID, and CLAUDE_HAIKU_URL.', + }) + + try { + await runCommand(Cmd, [ + 'claude-3-haiku', + '--app=app1', + '--as=CLAUDE_HAIKU', + ]) + } catch (error: unknown) { + const {message} = error as Error + expect(stripAnsi(message)).to.eq('Confirmation did not match app1. Aborted.') + } + + expect(prompt.calledOnce).to.be.true + expect(stripAnsi(stderr.output)).to.contain('Adding CLAUDE_HAIKU to app app1 would overwrite existing vars') + expect(stripAnsi(stdout.output)).to.eq('') + }) + + it('fails if the user provides the wrong --confirmation option value', async function () { + const prompt = sandbox.stub(ux, 'prompt') + api + .post('/apps/app1/addons', { + config: {}, + confirm: 'wrong-app-name', + plan: {name: 'inference:claude-3-haiku'}, + attachment: {name: 'CLAUDE_HAIKU'}, + }) + .reply(423, { + id: 'confirmation_required', + message: 'Adding CLAUDE_HAIKU to app app1 would overwrite existing vars CLAUDE_HAIKU_KEY, CLAUDE_HAIKU_MODEL_ID, and CLAUDE_HAIKU_URL.', + }) + + try { + await runCommand(Cmd, [ + 'claude-3-haiku', + '--app=app1', + '--as=CLAUDE_HAIKU', + '--confirm=wrong-app-name', + ]) + } catch (error: unknown) { + const {message} = error as Error + expect(stripAnsi(message)).to.eq('Confirmation wrong-app-name did not match app1. Aborted.') + } + + expect(prompt.calledOnce).to.be.false + expect(stripAnsi(stderr.output)).not.to.contain('Adding CLAUDE_HAIKU to app app1 would overwrite existing vars') + expect(stripAnsi(stdout.output)).to.eq('') + }) + }) + + context('when using an invalid model name argument', function () { + beforeEach(function () { + const message = 'Couldn\'t find either the add-on service or the add-on plan of "inference:not-a-model-name".' + api + .post('/apps/app1/addons', { + config: {}, + plan: {name: 'inference:not-a-model-name'}, + attachment: {}, + }) + .reply(422, {id: 'invalid_params', message}) + }) + + it('errors out, showing the appropriate message', async function () { + try { + await runCommand(Cmd, [ + 'not-a-model-name', + '--app=app1', + ]) + } catch (error: unknown) { + const {message, oclif} = error as CLIError + expect(stripAnsi(message)).to.eq( + 'not-a-model-name is an invalid model name. Run heroku ai:models:list for a list of valid models.' + ) + expect(oclif.exit).to.eq(1) + } + + expect(stripAnsi(stdout.output)).to.eq('') + }) + }) + + context('when using an invalid alias name argument', function () { + beforeEach(function () { + const message = 'Name must start with a letter and can only contain uppercase letters, numbers, and underscores.' + api + .post('/apps/app1/addons', { + config: {}, + plan: {name: 'inference:claude-3-haiku'}, + attachment: {name: 'wrong-alias'}, + }) + .reply(422, {id: 'invalid_params', message}) + }) + + it('errors out, showing the appropriate message', async function () { + try { + await runCommand(Cmd, [ + 'claude-3-haiku', + '--app=app1', + '--as=wrong-alias', + ]) + } catch (error: unknown) { + const {message, oclif} = error as CLIError + expect(stripAnsi(message)).to.eq( + 'wrong-alias is an invalid alias name. It must start with a letter and can only contain uppercase letters, numbers, and underscores.' + ) + expect(oclif.exit).to.eq(1) + } + + expect(stripAnsi(stdout.output)).to.eq('') + }) + }) +}) diff --git a/test/commands/ai/models/list.test.ts b/test/commands/ai/models/list.test.ts index 7cacdb6..4171268 100644 --- a/test/commands/ai/models/list.test.ts +++ b/test/commands/ai/models/list.test.ts @@ -8,13 +8,16 @@ import {CLIError} from '@oclif/core/lib/errors' import nock from 'nock' describe('ai:models:list', function () { + const {env} = process let herokuAI: nock.Scope beforeEach(function () { + process.env = {} herokuAI = nock('https://inference.heroku.com') }) afterEach(function () { + process.env = env herokuAI.done() nock.cleanAll() }) @@ -25,15 +28,15 @@ describe('ai:models:list', function () { .reply(200, availableModels) await runCommand(Cmd) - .then(() => expect(stdout.output).to.contain('stable-diffusion-xl Text to image')) - .then(() => expect(stdout.output).to.contain('claude-3-5-sonnet Text to text')) - .then(() => expect(stdout.output).to.contain('claude-3-opus Text to text')) - .then(() => expect(stdout.output).to.contain('claude-3-sonnet Text to text')) - .then(() => expect(stdout.output).to.contain('claude-3-haiku Text to text')) - .then(() => expect(stdout.output).to.contain('cohere-embed-english Text to text, Embedding')) - .then(() => expect(stdout.output).to.contain('cohere-embed-multilingual Text to text, Embedding')) - .then(() => expect(stdout.output).to.contain('See https://devcenter.heroku.com/articles/rainbow-unicorn-princess-models for more info')) - .then(() => expect(stderr.output).to.eq('')) + expect(stdout.output).to.contain('stable-diffusion-xl Text to image') + expect(stdout.output).to.contain('claude-3-5-sonnet Text to text') + expect(stdout.output).to.contain('claude-3-opus Text to text') + expect(stdout.output).to.contain('claude-3-sonnet Text to text') + expect(stdout.output).to.contain('claude-3-haiku Text to text') + expect(stdout.output).to.contain('cohere-embed-english Text to text, Embedding') + expect(stdout.output).to.contain('cohere-embed-multilingual Text to text, Embedding') + expect(stdout.output).to.contain('See https://devcenter.heroku.com/articles/rainbow-unicorn-princess-models for more info') + expect(stderr.output).to.eq('') }) it('warns if no models are available', async function () { diff --git a/test/helpers/fixtures.ts b/test/helpers/fixtures.ts index d0d55b0..9a7156c 100644 --- a/test/helpers/fixtures.ts +++ b/test/helpers/fixtures.ts @@ -207,3 +207,32 @@ export const addon4Attachment1: Heroku.AddOnAttachment = { id: '7ee1ae69-9810-4e4e-8c1d-df96af9625a5', name: 'INFERENCE', } + +export const addon1Provisioned: Heroku.AddOn = { + ...addon1, + config_vars: [ + 'INFERENCE_KEY', + 'INFERENCE_MODEL_ID', + 'INFERENCE_URL', + ], + plan: { + id: '927beee9-dc83-4bcc-b1f7-70c091ece601', + price: { + cents: 0, + unit: 'month', + contract: false, + }, + name: 'inference:claude-3-haiku', + }, + provision_message: 'Heroku AI model resource provisioned successfully', + state: 'provisioned', +} + +export const addon1ProvisionedWithAttachmentName: Heroku.AddOn = { + ...addon1Provisioned, + config_vars: [ + 'CLAUDE_HAIKU_KEY', + 'CLAUDE_HAIKU_ID', + 'CLAUDE_HAIKU_URL', + ], +} diff --git a/test/lib/base.test.ts b/test/lib/base.test.ts index db337a5..aa401a4 100644 --- a/test/lib/base.test.ts +++ b/test/lib/base.test.ts @@ -18,20 +18,20 @@ import {flags} from '@heroku-cli/command' class CommandWithoutConfiguration extends BaseCommand { async run() { - this.herokuAI.get('/models/01234567-89ab-cdef-0123-456789abcdef') + this.herokuAI.get('/models/01234567-89ab-cdef-0123-456789abcdef') } } -class CommandConfiguredWithoutInstanceName extends BaseCommand { +class CommandConfiguredWithoutResourceName extends BaseCommand { async run() { await this.configureHerokuAIClient() await this.herokuAI.get('/models') } } -class CommandConfiguredWithInstanceName extends BaseCommand { +class CommandConfiguredWithResourceName extends BaseCommand { static args = { - instance_name: Args.string({required: true}), + resource_name: Args.string({required: true}), } static flags = { @@ -39,12 +39,12 @@ class CommandConfiguredWithInstanceName extends BaseCommand { } async run() { - const {args, flags} = await this.parse(CommandConfiguredWithInstanceName) - const {instance_name: instanceName} = args + const {args, flags} = await this.parse(CommandConfiguredWithResourceName) + const {resource_name: resourceName} = args const {app} = flags - await this.configureHerokuAIClient(instanceName, app) - await this.herokuAI.get(`/models/${this.addon.id}`) + await this.configureHerokuAIClient(resourceName, app) + await this.herokuAI.get(`/models/${this.addon.id}`) } } @@ -82,13 +82,13 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when the command doesn’t require an instance name', function () { + context('when the command doesn’t require a resource name', function () { it('makes a request to the default host', async function () { const defaultApiHost = nock('https://inference.heroku.com') .get('/models') .reply(200, []) - await runCommand(CommandConfiguredWithoutInstanceName) + await runCommand(CommandConfiguredWithoutResourceName) defaultApiHost.done() }) @@ -102,13 +102,13 @@ describe('attempt a request using the Heroku AI client', function () { .get('/models') .reply(200, []) - await runCommand(CommandConfiguredWithoutInstanceName) + await runCommand(CommandConfiguredWithoutResourceName) customApiHost.done() }) }) - context('when the model instance isn’t fully provisioned', function () { + context('when the model resource isn’t fully provisioned', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon1.name, app: null}) @@ -121,12 +121,12 @@ describe('attempt a request using the Heroku AI client', function () { it('returns an error message and exits with a status of 1', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon1.name as string, ]) } catch (error) { const {message, oclif} = error as CLIError - expect(stripAnsi(message)).to.equal('Model instance inference-regular-74659 isn’t fully provisioned on app1.') + expect(stripAnsi(message)).to.equal('Model resource inference-regular-74659 isn’t fully provisioned on app1.') expect(oclif.exit).to.equal(1) } @@ -135,7 +135,7 @@ describe('attempt a request using the Heroku AI client', function () { }) describe('user with unrestricted access to all apps and add-ons', function () { - context('when using an inexistent model instance name and no app', function () { + context('when using an inexistent model resource name and no app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'inference-inexistent-00001', app: null}) @@ -146,14 +146,14 @@ describe('attempt a request using the Heroku AI client', function () { it('returns a not found error message', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'inference-inexistent-00001', ]) } catch (error) { const {message} = error as Error expect(stripAnsi(message)).to.equal(heredoc` - We can’t find a model instance called inference-inexistent-00001. - Run heroku ai:models:info --app to see a list of model instances. + We can’t find a model resource called inference-inexistent-00001. + Run heroku ai:models:info --app to see a list of model resources. `) } @@ -161,7 +161,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name with the wrong app', function () { + context('when using an existent model resource name with the wrong app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon1.name, app: 'app2'}) @@ -172,15 +172,15 @@ describe('attempt a request using the Heroku AI client', function () { it('returns a not found error message', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon1.name as string, '--app=app2', ]) } catch (error) { const {message} = error as Error expect(stripAnsi(message)).to.equal(heredoc` - We can’t find a model instance called ${addon1.name} on app2. - Run heroku ai:models:info --app app2 to see a list of model instances. + We can’t find a model resource called ${addon1.name} on app2. + Run heroku ai:models:info --app app2 to see a list of model resources. `) } @@ -188,7 +188,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using the add-on service slug and no app, matching multiple model instances', function () { + context('when using the add-on service slug and no app, matching multiple model resources', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'inference', app: null}) @@ -199,14 +199,14 @@ describe('attempt a request using the Heroku AI client', function () { it('returns an ambiguous identifier error message', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'inference', ]) } catch (error) { const {message} = error as Error expect(stripAnsi(message)).to.equal(heredoc` - Multiple model instances match inference: ${addon1.name}, ${addon2.name}, ${addon3.name}, ${addon4.name}. - Specify the model instance by its name instead. + Multiple model resources match inference: ${addon1.name}, ${addon2.name}, ${addon3.name}, ${addon4.name}. + Specify the model resource by its name instead. `) } @@ -214,7 +214,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using the add-on service slug and app, matching a single instance', function () { + context('when using the add-on service slug and app, matching a single resource', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'inference', app: 'app2'}) @@ -236,7 +236,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon4.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'inference', '--app=app2', ]) @@ -246,7 +246,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using the add-on plan slug and no app, matching multiple model instances', function () { + context('when using the add-on plan slug and no app, matching multiple model resources', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'inference:claude-3-opus', app: null}) @@ -257,14 +257,14 @@ describe('attempt a request using the Heroku AI client', function () { it('returns an ambiguous identifier error message', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'inference:claude-3-opus', ]) } catch (error) { const {message} = error as Error expect(stripAnsi(message)).to.equal(heredoc` - Multiple model instances match inference:claude-3-opus: ${addon2.name}, ${addon4.name}. - Specify the model instance by its name instead. + Multiple model resources match inference:claude-3-opus: ${addon2.name}, ${addon4.name}. + Specify the model resource by its name instead. `) } @@ -272,7 +272,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using the add-on plan slug and app, matching a single instance', function () { + context('when using the add-on plan slug and app, matching a single resource', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'inference:claude-3-opus', app: 'app2'}) @@ -294,7 +294,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon4.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'inference:claude-3-opus', '--app=app2', ]) @@ -304,7 +304,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using a partial attachment name and app, matching multiple model instance attachments', function () { + context('when using a partial attachment name and app, matching multiple model resource attachments', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'INFERENCE', app: 'app1'}) @@ -319,15 +319,15 @@ describe('attempt a request using the Heroku AI client', function () { it('returns an ambiguous identifier error message', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'INFERENCE', '--app=app1', ]) } catch (error) { const {message} = error as Error expect(stripAnsi(message)).to.equal(heredoc` - Multiple model instances match INFERENCE on app1: ${addon2.name}, ${addon3.name}. - Specify the model instance by its name instead. + Multiple model resources match INFERENCE on app1: ${addon2.name}, ${addon3.name}. + Specify the model resource by its name instead. `) } @@ -335,7 +335,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an exact attachment name and app, matching a single instance', function () { + context('when using an exact attachment name and app, matching a single resource', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'INFERENCE_PINK', app: 'app1'}) @@ -357,7 +357,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon2.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'INFERENCE_PINK', '--app=app1', ]) @@ -367,7 +367,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name with multiple attachments to different apps and no app', function () { + context('when using an existent model resource name with multiple attachments to different apps and no app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon3.name, app: null}) @@ -387,7 +387,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon3.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon3.name as string, ]) @@ -396,7 +396,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name with multiple attachments to different apps and the billing app', function () { + context('when using an existent model resource name with multiple attachments to different apps and the billing app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon3.name, app: addon3.app?.name}) @@ -416,7 +416,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon3.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon3.name as string, '--app=app1', ]) @@ -426,7 +426,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name with multiple attachments to different apps and the attached app', function () { + context('when using an existent model resource name with multiple attachments to different apps and the attached app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon3.name, app: addon3Attachment2.app?.name}) @@ -446,7 +446,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon3.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon3.name as string, '--app=app2', ]) @@ -456,7 +456,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name with multiple attachments to the same app', function () { + context('when using an existent model resource name with multiple attachments to the same app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon2.name, app: null}) @@ -476,7 +476,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon2.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon2.name as string, ]) @@ -487,7 +487,7 @@ describe('attempt a request using the Heroku AI client', function () { }) describe('user with restricted access to apps and add-ons', function () { - context('when using the add-on service slug, matching a single instance on the accessible app', function () { + context('when using the add-on service slug, matching a single resource on the accessible app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'inference', app: null}) @@ -509,7 +509,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon4.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'inference', ]) @@ -518,7 +518,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using the add-on plan slug, matching a single instance on the accessible app', function () { + context('when using the add-on plan slug, matching a single resource on the accessible app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'inference:claude-3-opus', app: null}) @@ -540,7 +540,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon4.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'inference:claude-3-opus', ]) @@ -549,7 +549,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using a partial attachment name and app, matching multiple model instance attachments', function () { + context('when using a partial attachment name and app, matching multiple model resource attachments', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'INFERENCE', app: 'app2'}) @@ -564,15 +564,15 @@ describe('attempt a request using the Heroku AI client', function () { it('returns an ambiguous identifier error message', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'INFERENCE', '--app=app2', ]) } catch (error) { const {message} = error as Error expect(stripAnsi(message)).to.equal(heredoc` - Multiple model instances match INFERENCE on app2: ${addon3.name}, ${addon4.name}. - Specify the model instance by its name instead. + Multiple model resources match INFERENCE on app2: ${addon3.name}, ${addon4.name}. + Specify the model resource by its name instead. `) } @@ -580,7 +580,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an exact attachment name and app, matching a single instance', function () { + context('when using an exact attachment name and app, matching a single resource', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: 'INFERENCE_JADE', app: 'app2'}) @@ -602,7 +602,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon3.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ 'INFERENCE_JADE', '--app=app2', ]) @@ -612,7 +612,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name and no app, matching a single instance accessible through the attachment', function () { + context('when using an existent model resource name and no app, matching a single resource accessible through the attachment', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon3.name, app: null}) @@ -636,7 +636,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon3.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon3.name as string, ]) @@ -645,7 +645,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name and the non-accessible app', function () { + context('when using an existent model resource name and the non-accessible app', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon3.name, app: 'app1'}) @@ -656,7 +656,7 @@ describe('attempt a request using the Heroku AI client', function () { it('returns a forbidden error message', async function () { try { - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon3.name as string, '--app=app1', ]) @@ -669,7 +669,7 @@ describe('attempt a request using the Heroku AI client', function () { }) }) - context('when using an existent model instance name and the accesible app with the attachment', function () { + context('when using an existent model resource name and the accesible app with the attachment', function () { beforeEach(async function () { api .post('/actions/addons/resolve', {addon: addon3.name, app: 'app2'}) @@ -691,7 +691,7 @@ describe('attempt a request using the Heroku AI client', function () { .get(`/models/${addon3.id}`) .reply(200, {}) - await runCommand(CommandConfiguredWithInstanceName, [ + await runCommand(CommandConfiguredWithResourceName, [ addon3.name as string, '--app=app2', ]) diff --git a/test/lib/confirmCommand.test.ts b/test/lib/confirmCommand.test.ts new file mode 100644 index 0000000..2d76dca --- /dev/null +++ b/test/lib/confirmCommand.test.ts @@ -0,0 +1,55 @@ +/* eslint-disable mocha/no-setup-in-describe */ +import {ux} from '@oclif/core' +import {expect, test} from '@oclif/test' +import stripAnsi from 'strip-ansi' +import confirmCommand from '../../src/lib/confirmCommand' + +describe('confirmApp', function () { + test + .stdout() + .stderr() + .do(() => confirmCommand('app', 'app')) + .it('should not error or prompt with confirm flag match', ({stderr, stdout}) => { + expect(stderr).to.equal('') + expect(stdout).to.equal('') + }) + + test + .stdout() + .stderr() + .do(() => confirmCommand('app', 'nope')) + .catch((error: Error) => { + expect(stripAnsi(error.message)).to.equal('Confirmation nope did not match app. Aborted.') + }) + .it('should err on confirm flag mismatch') + + test + .stdout() + .stderr() + .stub(ux, 'prompt', () => Promise.resolve('app')) + .do(() => confirmCommand('app')) + .it('should not err on confirm prompt match', ({stderr, stdout}) => { + expect(stderr).to.contain('Warning: Destructive Action') + expect(stdout).to.equal('') + }) + + const customMessage = 'custom message' + + test + .stdout() + .stderr() + .stub(ux, 'prompt', () => Promise.resolve('app')) + .do(() => confirmCommand('app', undefined, customMessage)) + .it('should display custom message', ({stderr, stdout}) => { + expect(stderr).to.contain(customMessage) + expect(stdout).to.equal('') + }) + + test + .stub(ux, 'prompt', () => Promise.resolve('nope')) + .do(() => confirmCommand('app')) + .catch((error: Error) => { + expect(stripAnsi(error.message)).to.equal('Confirmation did not match app. Aborted.') + }) + .it('should err on confirm prompt mismatch') +}) diff --git a/yarn.lock b/yarn.lock index b2e0093..0d70f63 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6043,6 +6043,11 @@ pretty-bytes@^5.1.0, pretty-bytes@^5.2.0: resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.6.0.tgz#356256f643804773c82f64723fe78c92c62beaeb" integrity sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg== +printf@^0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/printf/-/printf-0.6.1.tgz#b9afa3d3b55b7f2e8b1715272479fc756ed88650" + integrity sha512-is0ctgGdPJ5951KulgfzvHGwJtZ5ck8l042vRkV6jrkpBzTmb/lueTqguWHy2JfVA+RY6gFVlaZgUS0j7S/dsw== + process-nextick-args@^2.0.0, process-nextick-args@~2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2"