diff --git a/_import-artifact/.gitattributes b/_import-artifact/.gitattributes new file mode 100644 index 000000000000..3c53eb39d62f --- /dev/null +++ b/_import-artifact/.gitattributes @@ -0,0 +1,2 @@ +# Use Unix line endings in all text files. +* text=auto eol=lf diff --git a/_import-artifact/.github/codecov.yml b/_import-artifact/.github/codecov.yml new file mode 100644 index 000000000000..8a0c37f164ad --- /dev/null +++ b/_import-artifact/.github/codecov.yml @@ -0,0 +1,11 @@ +comment: false +coverage: + status: + project: + default: + informational: true + patch: + default: + informational: true +ignore: + - '**/*.tsx' diff --git a/_import-artifact/.github/dependabot.yml b/_import-artifact/.github/dependabot.yml new file mode 100644 index 000000000000..af8120ff11ce --- /dev/null +++ b/_import-artifact/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: 'github-actions' + directory: '/' + schedule: + # Check for updates to GitHub Actions every week + interval: 'weekly' diff --git a/_import-artifact/.github/typos.toml b/_import-artifact/.github/typos.toml new file mode 100644 index 000000000000..704e8ca37f82 --- /dev/null +++ b/_import-artifact/.github/typos.toml @@ -0,0 +1,2 @@ +[files] +extend-exclude = ["__snapshots__"] \ No newline at end of file diff --git a/_import-artifact/.github/workflows/ci.yml b/_import-artifact/.github/workflows/ci.yml new file mode 100644 index 000000000000..62dea1af077a --- /dev/null +++ b/_import-artifact/.github/workflows/ci.yml @@ -0,0 +1,94 @@ +name: CI + +on: + push: + branches: [main, next] + pull_request: + branches: [main] + +jobs: + ci: + permissions: + id-token: write + # contents permission to update benchmark contents in gh-pages branch + # without benchmarks, would only need read permission + contents: write + # deployments permission to deploy GitHub pages website + deployments: write + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + strategy: + fail-fast: false + matrix: + os: + - ubuntu-latest + - windows-latest + - macOS-latest + + steps: + - name: Setup repo + uses: actions/checkout@v4 + with: + submodules: true + + - name: Setup Deno + uses: denoland/setup-deno@v2 + with: + deno-version: v2.x + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + ~/.cache + ~/.cache/deno + key: ${{ runner.os }}-deno-${{ hashFiles('deno.json') }} + + - name: Check typos + if: matrix.os == 'ubuntu-latest' + uses: crate-ci/typos@master + with: + config: ./.github/typos.toml + + - name: Check formatting, linting, and types + run: deno task check + + - name: Test + run: deno task test + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + + + - name: Create lcov file + if: github.ref == 'refs/heads/main' && matrix.os == 'ubuntu-latest' + run: deno task cov:gen + + - name: Upload coverage + if: github.ref == 'refs/heads/main' && matrix.os == 'ubuntu-latest' + uses: codecov/codecov-action@v4 + with: + name: ${{ matrix.os }} + files: cov.lcov + token: ${{ secrets.CODECOV_TOKEN }} + slug: dreamcatcher-tech/artifact + + - name: Run benchmark + if: matrix.os == 'ubuntu-latest' + run: deno task bench | tee output.txt + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + + - name: Store benchmark result + if: github.ref == 'refs/heads/main' && matrix.os == 'ubuntu-latest' + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Artifact Benchmarks + tool: 'benchmarkjs' + output-file-path: output.txt + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + # Show alert with commit comment on detecting possible performance regression + alert-threshold: '200%' + comment-on-alert: true + fail-on-alert: true + diff --git a/_import-artifact/.gitignore b/_import-artifact/.gitignore new file mode 100644 index 000000000000..7eb667567ab0 --- /dev/null +++ b/_import-artifact/.gitignore @@ -0,0 +1,11 @@ +.env +*.DS_Store +cov/ +cov.lcov +.idea +_fresh/ +backup.json +html_cov/ +coverage/ +node_modules/ +tmp/ \ No newline at end of file diff --git a/_import-artifact/.gitmodules b/_import-artifact/.gitmodules new file mode 100644 index 000000000000..8b72f644c02f --- /dev/null +++ b/_import-artifact/.gitmodules @@ -0,0 +1,3 @@ +[submodule "HAL"] + path = _import-artifact/HAL + url = git@github.com:dreamcatcher-tech/HAL.git diff --git a/_import-artifact/.vscode/extensions.json b/_import-artifact/.vscode/extensions.json new file mode 100644 index 000000000000..c75f091bcc36 --- /dev/null +++ b/_import-artifact/.vscode/extensions.json @@ -0,0 +1,7 @@ +{ + "recommendations": [ + "denoland.vscode-deno", + "ryanluker.vscode-coverage-gutters", + "dotenv.dotenv-vscode" + ] +} diff --git a/_import-artifact/.vscode/launch.json b/_import-artifact/.vscode/launch.json new file mode 100644 index 000000000000..45e22d92d741 --- /dev/null +++ b/_import-artifact/.vscode/launch.json @@ -0,0 +1,90 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "request": "launch", + "name": "Serve", + "type": "node", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "~/.deno/bin/deno", + "runtimeArgs": ["task", "api"], + "attachSimplePort": 9229, + "internalConsoleOptions": "neverOpen", + "console": "integratedTerminal" + }, + { + "request": "launch", + "name": "io", + "type": "node", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "~/.deno/bin/deno", + "runtimeArgs": ["task", "watch", "--inspect-wait", "io/io.test.ts"], + "attachSimplePort": 9229, + "internalConsoleOptions": "neverOpen", + "console": "integratedTerminal" + }, + { + "request": "launch", + "name": "longthread", + "type": "node", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "~/.deno/bin/deno", + "runtimeArgs": [ + "task", + "watch", + "--inspect-wait", + "isolates/longthread.test.ts" + ], + "attachSimplePort": 9229, + "internalConsoleOptions": "neverOpen", + "console": "integratedTerminal" + }, + { + "request": "launch", + "name": "FS", + "type": "node", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "~/.deno/bin/deno", + "runtimeArgs": ["task", "watch", "--inspect-wait", "git/fs.test.ts"], + "attachSimplePort": 9229, + "internalConsoleOptions": "neverOpen", + "console": "integratedTerminal" + }, + { + "request": "launch", + "name": "Engine", + "type": "node", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "~/.deno/bin/deno", + "runtimeArgs": ["task", "watch", "--inspect-wait", "engine.test.ts"], + "attachSimplePort": 9229, + "internalConsoleOptions": "neverOpen", + "console": "integratedTerminal" + }, + { + "request": "launch", + "name": "Cloud", + "type": "node", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "~/.deno/bin/deno", + "runtimeArgs": ["task", "tc", "--watch", "--inspect-wait"], + "attachSimplePort": 9229, + "internalConsoleOptions": "neverOpen", + "console": "integratedTerminal" + }, + { + "request": "launch", + "name": "Launch Program", + "type": "node", + "cwd": "${workspaceFolder}", + "runtimeExecutable": "~/.deno/bin/deno", + "runtimeArgs": ["task", "watch", "--inspect-wait", "${relativeFile}"], + "attachSimplePort": 9229, + "internalConsoleOptions": "neverOpen", + "console": "integratedTerminal" + } + ] +} diff --git a/_import-artifact/.vscode/settings.json b/_import-artifact/.vscode/settings.json new file mode 100644 index 000000000000..b16523d5fcd1 --- /dev/null +++ b/_import-artifact/.vscode/settings.json @@ -0,0 +1,27 @@ +{ + "deno.enable": true, + "deno.lint": true, + "editor.formatOnSave": true, + "editor.defaultFormatter": "denoland.vscode-deno", + "deno.cacheOnSave": true, + "deno.codeLens.references": false, + "deno.codeLens.implementations": false, + "deno.codeLens.referencesAllFunctions": true, + "deno.codeLens.test": true, + "deno.testing.args": ["--allow-all", "--no-check", "--unstable-kv"], + "deno.codeLens.testArgs": ["--allow-all", "--no-check", "--unstable-kv"], + "coverage-gutters.coverageFileNames": [ + "cov.lcov", + "lcov.info", + "cov.xml", + "coverage.xml", + "jacoco.xml", + "coverage.cobertura.xml" + ], + "coverage-gutters.showLineCoverage": true, + "coverage-gutters.showRulerCoverage": false, + "coverage-gutters.showGutterCoverage": false, + "[markdown]": { + "editor.defaultFormatter": "esbenp.prettier-vscode" + } +} diff --git a/_import-artifact/HAL b/_import-artifact/HAL new file mode 160000 index 000000000000..abef8f67ed0f --- /dev/null +++ b/_import-artifact/HAL @@ -0,0 +1 @@ +Subproject commit abef8f67ed0f034d4769d44c377a92459f993970 diff --git a/_import-artifact/LICENSE b/_import-artifact/LICENSE new file mode 100644 index 000000000000..0ad25db4bd1d --- /dev/null +++ b/_import-artifact/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/_import-artifact/README.md b/_import-artifact/README.md new file mode 100644 index 000000000000..9f5c5ff61abb --- /dev/null +++ b/_import-artifact/README.md @@ -0,0 +1,43 @@ +# Artifact + +The memory of the Dreamcatcher + +[![codecov](https://codecov.io/github/dreamcatcher-tech/artifact/graph/badge.svg?token=ZWIRKLK99L)](https://codecov.io/github/dreamcatcher-tech/artifact) + +[![codecov](https://codecov.io/github/dreamcatcher-tech/artifact/graphs/sunburst.svg?token=ZWIRKLK99L)](https://codecov.io/github/dreamcatcher-tech/artifact/tree/main) + +Work occurs on the `next` branch, then when tests pass this is manually merged +in to `main`. + +## Deployments + +### HAL2 + +- Repo: https://github.com/dreamcatcher-tech/hal2 +- Web: https://hal.dreamcatcher.land +- API: https://healthy-seal-74.deno.dev +- released on: 2024-05-25 +- frozen on: 2024-06-24 + +### HAL3 + +- Repo: https://github.com/dreamcatcher-tech/artifact-ui +- Web: https://hal3.dreamcatcher.land +- API: https://hal3.deno.dev +- released on: 2024-06-24 +- frozen on: 2024-07-13 + +### backchat + +- Repo: https://github.com/dreamcatcher-tech/artifact-ui +- Web: https://backchat.dreamcatcher.land +- API: https://hal3-next.deno.dev +- released on: 2024-07-20 +- frozen on: 2024-07-23 + +### longthreat + +- Repo: https://github.com/dreamcatcher-tech/artifact-ui +- Web: https://longthreat-ui.deno.dev +- API: https://longthreat.deno.dev +- released on: 2024-07-23 diff --git a/_import-artifact/api/client-backchat.ts b/_import-artifact/api/client-backchat.ts new file mode 100644 index 000000000000..1a379cdfc968 --- /dev/null +++ b/_import-artifact/api/client-backchat.ts @@ -0,0 +1,297 @@ +// THIS IS SYNCED FROM THE ARTIFACT PROJECT +// TODO publish to standalone repo +import { + addBranches, + backchatIdRegex, + backchatStateSchema, + EngineInterface, + getParent, + getThreadPath, + IoStruct, + JsonValue, + Params, + PID, + Pierce, + Proctype, + RpcOpts, + SU_ACTOR, + SU_BACKCHAT, + threadSchema, + toActions, + UnsequencedRequest, +} from './types.ts' +import * as actor from './isolates/actor.ts' +import * as files from './isolates/files.ts' +import { ulid } from 'ulid' +import { PierceWatcher } from './watcher.ts' +import { Crypto } from './crypto.ts' +import { ZodObject, ZodTypeAny } from 'zod' +import z from 'zod' +type Init = { + repo: string + isolate?: string + params?: Params +} + +export class Backchat { + readonly #engine: EngineInterface + readonly #crypto: Crypto + readonly #pid: PID + readonly #abort = new AbortController() + readonly #watcher: PierceWatcher + + private constructor(engine: EngineInterface, crypto: Crypto, pid: PID) { + this.#engine = engine + this.#crypto = crypto + this.#pid = pid + this.#watcher = PierceWatcher.create(this.#abort.signal, engine, pid) + this.#watcher.watchPierces() + + engine.abortSignal.addEventListener('abort', () => { + this.#abort.abort() + }) + } + /** + * If the resume parameter is provided, the backchat will attempt to resume + * the backchat branch with the given id. If the branch is not found, a new + * branch will be created with a new id. + * @param engine + * @param key The Machine private key + * @param resume the backchat id to attempt to resume + */ + static async upsert(engine: EngineInterface, key: string, resume?: string) { + if (resume && !backchatIdRegex.test(resume)) { + throw new Error('Invalid resume backchat id: ' + resume) + } + const crypto = Crypto.load(key) + const pid = await engine.upsertBackchat(crypto.machineId, resume) + return new Backchat(engine, crypto, pid) + } + static superuser(engine: EngineInterface, crypto: Crypto) { + const pid = addBranches(engine.homeAddress, SU_ACTOR, SU_BACKCHAT) + return new Backchat(engine, crypto, pid) + } + get pid() { + return this.#pid + } + get id() { + const id = this.pid.branches[2] + if (!backchatIdRegex.test(id)) { + throw new Error('Invalid backchat id: ' + id) + } + return id + } + get machineId() { + return this.#crypto.machineId + } + get homeAddress() { + return this.#engine.homeAddress + } + async threadPID() { + const io = await this.readJSON('.io.json') + const state = backchatStateSchema.parse(io.state) + return state.target + } + + /** + * This is the main function that is used to interact with the backchat + * system. The prompt is relayed thru a switchboard agent to select what is + * the best agent to process this message, which will be loaded and asked to + * process this message. + * @param content The optional text that is to be parsed by the AI. It can be + * empty if there are files attached or to indicate a positive response to + * something. + * @param attachments The relative paths to the files that were attached with + * the prompt, which may include directories. May include pointers to the + * tmp files that are created when a user attaches files in the browser. Can + * also include structured data that widgets have prepared. + */ + // TODO adorn with other types of input, like file paths and selections + prompt(content: string, target?: PID, attachments?: string[]) { + const params: Params = { content } + if (attachments) { + params.attachments = attachments + } + if (target) { + params.target = target + } + return this.#backchatAction('prompt', params) + } + newThread() { + return this.#backchatAction('newThread', {}) + } + changeRemote(remote?: PID) { + const params: Params = {} + if (remote) { + params.remote = remote + } + return this.#backchatAction('changeRemote', params) + } + + async actions(isolate: string, opts: RpcOpts = {}) { + const { target = this.#pid, ...procOpts } = opts + const schema = await this.apiSchema(isolate) + const execute = (request: UnsequencedRequest) => this.#action(request) + return toActions(target, isolate, schema, procOpts, execute) + } + #action(request: UnsequencedRequest) { + // TODO if the target is this branch, convert to a direct pierce + return this.#backchatAction('relay', { request }) + } + async #backchatAction(functionName: string, params: Params) { + const pierce: Pierce = { + target: this.#pid, + ulid: ulid(), + isolate: 'backchat', + functionName, + params, + proctype: Proctype.enum.SERIAL, + } + const promise = this.#watcher.watch(pierce.ulid) + // TODO handle an error in pierce + await this.#engine.pierce(pierce) + return await promise + } + ping(params?: { data?: JsonValue }) { + return this.#engine.ping(params?.data) + } + apiSchema(isolate: string) { + return this.#engine.apiSchema(isolate) + } + async transcribe(params: { audio: File }) { + // TODO check account standing to use this feature + if (!(params.audio instanceof File)) { + throw new Error('audio must be a File') + } + return await this.#engine.transcribe(params.audio) + } + /** + * Initialize a new repository, optionally installing an isolate with the + * given parameters. + */ + async init({ repo, isolate, params }: Init) { + const actor = await this.#getActor() + return actor.init({ repo, isolate, params }) + } + /** Clone the given repo from github, optionally installing an isolate with + * the given parameters + */ + async clone({ repo, isolate, params }: Init) { + const actor = await this.#getActor() + return actor.clone({ repo, isolate, params }) + } + async pull(params: { repo: string; target: PID }) { + const { repo, target = this.pid } = params + const actor = await this.#getActor() + return actor.pull({ repo, target }) + } + push(params: { pid: PID }) { + return Promise.resolve(params) + } + async rm(params: { repo?: string; all?: boolean }) { + // TODO move this to be rmRepo or something + const actor = await this.#getActor() + const { repo, all } = params + return actor.rm({ repo, all }) + } + async lsRepos() { + const actor = await this.#getActor() + return actor.lsRepos({}) + } + async #getActor() { + const target = getParent(this.#pid) + const actor = await this.actions('actor', { target }) + return actor + } + watch(pid: PID, path?: string, after?: string, signal?: AbortSignal) { + if (after) { + throw new Error('after not implemented') + } + return this.#engine.watch(pid, path, after, signal) + } + read(path: string, pid: PID = this.pid, commit?: string) { + return this.#engine.read(path, pid, commit) + } + readJSON(path: string, pid: PID = this.pid, commit?: string) { + return this.#engine.readJSON(path, pid, commit) + } + readBinary(path: string, pid: PID = this.pid, commit?: string) { + return this.#engine.readBinary(path, pid, commit) + } + async readThread(pid?: PID) { + if (!pid) { + pid = await this.threadPID() + } + const thread = await this.readJSON(getThreadPath(pid), pid) + return threadSchema.parse(thread) + } + readTree(path: string, target: PID = this.pid, commit?: string) { + // TODO use the web cache for these calls, both in engine and clients + return this.#engine.readTree(path, target, commit) + } + readMeta(path: string, target: PID = this.pid, commit?: string) { + // this should read the commit for the file + // if commit is given, then it will find the commit PRIOR to the given one + // where he requested file changed + // TODO make this follow renames + // TODO ensure that the order of the tree helps with knowing what changed + // this might be a read splice, where the filepath is provided + console.log('readMeta', path, target, commit) + } + /** + * Retrieves up to `count` splices for a given target PID (`target`). + * + * Starting from the specified `commit`, or the latest commit if none is + * provided, this method traverses the commit history by following parent + * commits. It collects splices associated with each commit. + * + * - If a `path` is specified, only splices where the specified file path was + * changed are included in the result. + * - The traversal continues until the specified number of splices (`count`) + * have been collected or there are no more parent commits to traverse. + * + * @param {PID} target - The target PID for which to retrieve splices. + * @param {Object} [opts={}] - Optional parameters. + * @param {string} [opts.commit] - The commit hash to start from. Defaults to the latest commit. + * @param {string} [opts.path] - If provided, filters splices to those where this path changed. + * @param {number} [opts.count=1] - The maximum number of splices to retrieve. + */ + splices( + target: PID, + opts: { commit?: string; path?: string; count?: number } = {}, + ) { + return this.#engine.splice(target, opts) + } + async state>>( + pid: PID, + schema: T, + ) { + const io = await this.readJSON('.io.json', pid) + return schema.parse(io.state) as z.infer + } + exists(path: string, pid: PID = this.pid) { + return this.#engine.exists(path, pid) + } + writeJSON(path: string, content?: JsonValue, pid: PID = this.pid) { + return this.write(path, JSON.stringify(content), pid) + } + async write(path: string, content?: string, target: PID = this.pid) { + const actions = await this.actions('files', { target }) + return actions.write({ reasoning: [], path, content }) + } + async delete(path: string, target: PID = this.pid): Promise { + const actions = await this.actions('files', { target }) + return actions.rm({ reasoning: [], path }) + } + async ls({ path = '.', target = this.pid }: { path?: string; target?: PID }) { + const tree = await this.readTree(path, target) + return tree.map((entry) => entry.path) + } + deleteAccountUnrecoverably(): Promise { + throw new Error('not implemented') + } + async lsChildren() { + const obj = await this.readJSON('.io.json') + return Object.values(obj.branches) + } +} diff --git a/_import-artifact/api/client-engine.ts b/_import-artifact/api/client-engine.ts new file mode 100644 index 000000000000..82e0ce6cab69 --- /dev/null +++ b/_import-artifact/api/client-engine.ts @@ -0,0 +1,318 @@ +import { pushable } from 'it-pushable' +import { EventSourceParserStream } from 'eventsource-parser/stream' +import { deserializeError } from 'serialize-error' +import { + backchatIdRegex, + EngineInterface, + freezePid, + JsonValue, + Params, + PID, + Pierce, + Splice, + TreeEntry, +} from './types.ts' +import { assert } from '@sindresorhus/is' +import { Crypto } from './crypto.ts' + +export class WebClientEngine implements EngineInterface { + readonly #url: string + readonly #aborts = new Set() + readonly #abort = new AbortController() + readonly #fetcher: ( + input: URL | RequestInfo, + init?: RequestInit, + ) => Promise + readonly #homeAddress: PID + readonly #schemas = new Map() + private constructor(url: string, fetcher: typeof fetch, homeAddress: PID) { + if (url.endsWith('/')) { + throw new Error('url should not end with "/": ' + url) + } + this.#url = url + this.#fetcher = fetcher + this.#homeAddress = homeAddress + } + static async start(url: string, fetcher?: typeof fetch) { + if (!fetcher) { + fetcher = (request, opts) => fetch(request, opts) + } + + const homeAddress = await request(fetcher, 'homeAddress', {}, { url }) + freezePid(homeAddress) + return new WebClientEngine(url, fetcher, homeAddress) + } + get homeAddress() { + return this.#homeAddress + } + get abortSignal() { + return this.#abort.signal + } + upsertBackchat(machineId: string, resume?: string): Promise { + Crypto.assert(machineId) + const params: { machineId: string; resume?: string } = { machineId, resume } + if (resume) { + assert.truthy(backchatIdRegex.test(resume), 'invalid resume') + } + return this.#request('upsertBackchat', params) + } + stop() { + this.#abort.abort() + for (const abort of this.#aborts) { + abort.abort('Engine stop') + } + } + async ping(data?: JsonValue) { + // TODO move back to everything being params rather than args + let params = {} + if (data) { + params = { data } + } + const result = await this.#request('ping', params) + if ('data' in result) { + return result.data + } + } + async pierce(pierce: Pierce) { + // TODO lock to only allowing a backchat branch to pierce + await this.#request('pierce', pierce) + } + async apiSchema(isolate: string) { + if (!isolate || typeof isolate !== 'string') { + throw new Error('isolate string is required') + } + if (this.#schemas.has(isolate)) { + return this.#schemas.get(isolate) + } + // TODO rely on the webcache + const result = await this.#request('apiSchema', { isolate }) + this.#schemas.set(isolate, result) + return result + } + async transcribe(audio: File) { + const formData = new FormData() + formData.append('audio', audio) + const abort = new AbortController() + this.#aborts.add(abort) + const request = new Request(`${this.#url}/api/transcribe`, { + method: 'POST', + body: formData, + signal: abort.signal, + }) + const response = await this.#fetcher(request) + + const outcome = await response.json() + if (outcome.error) { + throw deserializeError(outcome.error) + } + return outcome.result + } + watch(pid: PID, path?: string, after?: string, signal?: AbortSignal) { + const abort = new AbortController() + this.#aborts.add(abort) + if (signal) { + signal.addEventListener('abort', () => { + abort.abort() + }) + } + + const source = pushable({ objectMode: true }) + abort.signal.addEventListener('abort', () => source.return()) + + const pipe = async () => { + let lastSplice: Splice | undefined + let retryCount = 0 + while (!abort.signal.aborted) { + try { + const request = new Request(`${this.#url}/api/watch`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + pid, + path, + after: lastSplice?.oid || after, + }), + signal: abort.signal, + keepalive: true, + }) + const response = await this.#fetcher(request) + if (!response.ok) { + throw new Error('response not ok') + } + if (!response.body) { + throw new Error('response body is missing') + } + retryCount = 0 + const spliceStream = toEvents(response.body) + for await (const value of toIterable(spliceStream, abort.signal)) { + if (value.event === 'splice') { + const splice: Splice = JSON.parse(value.data) + lastSplice = splice + source.push(splice) + } else if (value.event !== 'keepalive') { + console.error('unexpected event', value.event, value) + } + } + } catch (error) { + console.log('stream error:', error) + } + if (!abort.signal.aborted) { + retryCount++ + const wait = Math.min(1000 * 2 ** retryCount, 30000) + console.log(`retrying read in ${wait}ms`) + await new Promise((resolve) => setTimeout(resolve, wait)) + } + } + } + pipe().catch(source.throw) + return source + } + async read(path: string, pid: PID, commit?: string) { + const params: { path: string; pid: PID; commit?: string } = { + path, + pid, + commit, + } + const result = await this.#request('read', params, { cache: !!commit }) + return result as string + } + async readTree(path: string, pid: PID, commit?: string) { + const params: { path: string; pid: PID; commit?: string } = { + path, + pid, + commit, + } + const result = await this.#request('readTree', params, { cache: !!commit }) + assert.array(result) + return result as TreeEntry[] + } + async readJSON(path: string, pid: PID, commit?: string) { + const params: { path: string; pid: PID; commit?: string } = { + path, + pid, + commit, + } + // TODO read the string from server then convert locally + const result = await this.#request('readJSON', params, { cache: !!commit }) + return result as T + } + async readBinary(path: string, pid?: PID, commit?: string) { + const params: { path: string; pid?: PID; commit?: string } = { + path, + pid, + commit, + } + const result = await this.#request('readBinary', params, { + cache: !!commit, + binary: true, + }) + return result as Uint8Array + } + async splice( + target: PID, + opts: { commit?: string; path?: string; count?: number } = {}, + ) { + const params = { target, opts } + const cache = !!opts.commit + const result = await this.#request('splice', params, { cache }) + // TODO run zod schema check + return result as Splice[] + } + async exists(path: string, pid: PID) { + const result = await this.#request('exists', { path, pid }) + return result as boolean + } + async #request( + path: string, + params: Params, + opts: { cache?: boolean; binary?: boolean } = {}, + ) { + const abort = new AbortController() + this.#aborts.add(abort) + const { signal } = abort + const cache = opts.cache + try { + const result = await request(this.#fetcher, path, params, { + url: this.#url, + signal, + cache, + binary: opts.binary, + }) + return result + } finally { + this.#aborts.delete(abort) + } + } +} +const request = async ( + fetcher: typeof fetch, + path: string, + params: Params, + opts: { + url: string + signal?: AbortSignal + cache?: boolean + binary?: boolean + }, +) => { + const request = new Request(`${opts.url}/api/${path}?pretty`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(params), + signal: opts.signal, + }) + let response: Response | undefined + let cache: Cache | undefined + if (opts.cache) { + cache = await globalThis.caches.open('web-client-engine') + response = await cache.match(toGetRequest(request, params)) + } + if (!response) { + response = await fetcher(request) + if (!response.ok) { + await response.body?.cancel() + const { status, statusText } = response + const msg = `${path} ${JSON.stringify(params)} ${status} ${statusText}` + throw new Error(msg) + } + if (cache) { + await cache.put(toGetRequest(request, params), response.clone()) + } + } + + if (opts.binary) { + const arrayBuffer = await response.arrayBuffer() + const bytes = new Uint8Array(arrayBuffer) + return bytes + } + const outcome = await response.json() + if (outcome.error) { + throw deserializeError(outcome.error) + } + return outcome.result +} +const toEvents = (stream: ReadableStream) => + stream + .pipeThrough(new TextDecoderStream()) + .pipeThrough(new EventSourceParserStream()) + +async function* toIterable(stream: ReadableStream, signal: AbortSignal) { + const reader = stream.getReader() + signal.addEventListener('abort', () => stream.locked && reader.cancel()) + try { + while (true) { + const { done, value } = await reader.read() + if (done) { + break + } + yield value + } + } finally { + reader.releaseLock() + } +} + +const toGetRequest = (request: Request, params: Record) => { + const url = request.url + '/' + JSON.stringify(params) + return new Request(url, { method: 'GET' }) +} diff --git a/_import-artifact/api/crypto.ts b/_import-artifact/api/crypto.ts new file mode 100644 index 000000000000..4e8bf0b017a7 --- /dev/null +++ b/_import-artifact/api/crypto.ts @@ -0,0 +1,38 @@ +import * as secp from '@noble/secp256k1' +import { ripemd160 } from '@noble/hashes/ripemd160' +import { base32 } from 'multiformats/bases/base32' +import { machineIdRegex } from './types.ts' + +export class Crypto { + readonly #privKey: Uint8Array + readonly #pubKey: Uint8Array + readonly #machineId: string + + static load(privateKey: string) { + if (!secp.utils.isValidPrivateKey(privateKey)) { + throw new Error('Invalid private key') + } + return new Crypto(privateKey) + } + static generatePrivateKey() { + return secp.etc.bytesToHex(secp.utils.randomPrivateKey()) + } + static assert(machineId: string) { + if (!machineIdRegex.test(machineId)) { + throw new Error('Invalid machine id: ' + machineId) + } + return true + } + private constructor(privateKey: string) { + this.#privKey = secp.etc.hexToBytes(privateKey) + this.#pubKey = secp.getPublicKey(this.#privKey) + const hash = ripemd160(this.#pubKey) + this.#machineId = 'mac_' + base32.encode(hash) + if (!machineIdRegex.test(this.#machineId)) { + throw new Error('Invalid machine id: ' + this.#machineId) + } + } + get machineId() { + return this.#machineId + } +} diff --git a/_import-artifact/api/isolates/actor.ts b/_import-artifact/api/isolates/actor.ts new file mode 100644 index 000000000000..d74361007321 --- /dev/null +++ b/_import-artifact/api/isolates/actor.ts @@ -0,0 +1,58 @@ +import { z } from 'zod' +import { + backchatIdRegex, + jsonSchema, + machineIdRegex, + md5, + pidSchema, + threadIdRegex, + ToApiType, +} from '../types.ts' + +const repoParams = z.object({ + repo: z.string(), + isolate: z.string().optional(), + params: z.record(jsonSchema).optional(), +}) +const repoResult = z.object({ + pid: pidSchema, + head: md5, +}) + +export const parameters = { + backchat: z.object({ + backchatId: z.string().regex(backchatIdRegex), + machineId: z.string().regex(machineIdRegex).optional(), + }).describe( + 'Create a new backchat branch. Optionally if machineId is given, will add to the actors list of machines, which is used during createActor function', + ), + thread: z.object({ threadId: z.string().regex(threadIdRegex) }), + init: repoParams, + clone: repoParams.describe( + 'Clones from github, using the github PAT (if any) for the calling machine. Updates the branch state in the actor branch to store the PID of the clone.', + ), + rm: z.object({ + repo: z.string().optional(), + all: z.boolean().optional().describe('remove all repos for this actor'), + }).refine((p) => p.repo || p.all, { + message: 'must specify repo or all', + }), + lsRepos: z.object({}).describe( + 'List all the repos that this Actor has created', + ), + pull: z.object({ + repo: z.string(), + target: pidSchema.optional(), + }), +} + +export const returns = { + backchat: pidSchema, + thread: pidSchema, + init: repoResult, + clone: repoResult, + rm: z.object({ reposDeleted: z.number() }), + lsRepos: z.array(z.string()), + pull: repoResult, +} +export type Api = ToApiType diff --git a/_import-artifact/api/isolates/files.ts b/_import-artifact/api/isolates/files.ts new file mode 100644 index 000000000000..73a9d24369ad --- /dev/null +++ b/_import-artifact/api/isolates/files.ts @@ -0,0 +1,105 @@ +import { reasoning, ToApiType } from '../types.ts' +import { z } from 'zod' + +export const parameters = { + write: z.object({ + reasoning, + path: z.string().describe('the relative path to the file'), + content: z.string().optional().describe( + 'the contents of the file to be written', + ), + }).describe( + 'Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation', + ), + ls: z.object({ + reasoning, + path: z.string().optional().describe( + 'the relative path to the directory you want to list', + ), + count: z.boolean().optional().describe( + 'count the number of files and return this instead of the names of the files', + ), + all: z.boolean().optional().describe( + 'include all files including hidden files in the operation', + ), + }).describe( + 'List files for a given path. Returns file names with directory names ending in "/". The root ("/") is actually just ".". To count the number of files instead of list them, set "count" to true. To include hidden files in the list or count, set "all" to true.', + ), + read: z.object({ + reasoning, + path: z.string().describe( + 'the relative path to the file you want to read', + ), + }).describe('Read a file. The contents will be returned as a string.'), + update: z.object({ + reasoning, + expectedMatches: z.number().int().gt(0).describe( + 'the expected number of matches for the regex', + ), + path: z.string().describe( + 'the relative path to the file you want to update', + ), + regex: z.string().describe('a regular expression string'), + replacement: z.string().describe('the replacement string'), + }).refine(({ regex }) => { + try { + new RegExp(regex) + return true + } catch (error) { + return !error + } + }) + .describe( + 'Update a file using a regex and a replacement string. The number of occurrences replaced will be returned to you as an integer. If you want to append something to a file, you can use a regex to match the end of the file and replace it with the contents you want to append. To delete portions of a file, you can use a regex to match the contents you want to delete and replace it with an empty string. Path must be relative.', + ), + rm: z.object({ + reasoning, + path: z.string().describe( + 'the relative path to the file you want to remove', + ), + }).describe('Remove a file. This is recursive.'), + mv: z.object({ + reasoning, + from: z.string().describe( + 'the relative path to the file you want to move', + ), + to: z.string().describe( + 'the relative path to the new location of the file', + ), + }).describe('Move a file efficiently. This is a rename operation.'), + cp: z.object({ + reasoning, + from: z.string().describe( + 'the relative path to the file you want to copy', + ), + to: z.string().describe( + 'the relative path to the new location of the file', + ), + }).describe('Copy a file efficiently.'), + search: z.object({ + reasoning, + query: z.string().describe( + 'the relative path to the file or directory you want to find', + ), + }).describe( + 'Search for a file or directory. Returns the relative path to the first match.', + ), +} +export const returns = { + /** The number of bytes written */ + write: z.object({ + charactersWritten: z.number(), + }), + ls: z.union([z.array(z.string()), z.number()]), + read: z.string(), + /** The number of occurrences replaced */ + update: z.object({ matchesUpdated: z.number() }), + rm: z.void(), + mv: z.void(), + cp: z.void(), + search: z.array( + z.object({ path: z.string(), description: z.string() }), + ), +} + +export type Api = ToApiType diff --git a/_import-artifact/api/randomness.ts b/_import-artifact/api/randomness.ts new file mode 100644 index 000000000000..ba268e5721aa --- /dev/null +++ b/_import-artifact/api/randomness.ts @@ -0,0 +1,43 @@ +import { ripemd160 } from '@noble/hashes/ripemd160' +import { base32crockford } from '@scure/base' +import { ulid as _ulid } from 'ulid' + +let deterministicMode = false +let count = 0 +export const _enableDeterministicMockMode = () => { + if (deterministicMode) { + console.error('Deterministic mode already enabled') + throw new Error('Deterministic mode already enabled') + } + deterministicMode = true +} +export const _disableDeterministicMockMode = () => { + if (!deterministicMode) { + console.error('Deterministic mode already disabled') + throw new Error('Deterministic mode already disabled') + } + deterministicMode = false + count = 0 +} +export const peekRandomnessCount = () => { + return count +} + +const getSeed = () => { + if (deterministicMode) { + return '' + count++ + } + return _ulid() +} + +export const randomness = () => { + return fixedRandomness(getSeed()) +} + +export const fixedRandomness = (seed: string) => { + const hash = ripemd160(seed) + const encoded = base32crockford.encode(hash) + const result = encoded.slice(-16) + + return result +} diff --git a/_import-artifact/api/tps-report.ts b/_import-artifact/api/tps-report.ts new file mode 100644 index 000000000000..b09d5458aff3 --- /dev/null +++ b/_import-artifact/api/tps-report.ts @@ -0,0 +1,280 @@ +import { md5 } from './types.ts' +import { z } from 'zod' + +export const outcome = z + .object({ + reasoning: z.array(z.string()).describe( + 'the chain of thought reasoning for how the outcome was decided upon', + ), + outcome: z.boolean().describe( + 'the outcome of the test iteration, true if the expectation was met, false if it was not', + ), + analysis: z.array(z.string()).optional().describe( + 'the step by step analysis of WHY the system prompt of the target agent under test did NOT perform as well in the outcome as it could have', + ), + improvements: z.array(z.string()).optional().describe( + 'the improvement(s) to the agent prompt that would have resulted in better performance to reach the outcome', + ), + }) + .describe( + 'The assessment outcome of a single test iteration, including reasoning, analysis, and improvements', + ) + +export type TestIteration = z.infer +export const testIteration = z + .object({ + commit: md5.describe('the commit this iteration completed on'), + prompts: z.array(z.string()).describe('the prompt(s) that were used'), + outcomes: z.array(outcome).describe('the outcomes of this iteration'), + }) +const summary = z + .object({ + timestamp: z + .number() + .int() + .gt(1723003530757) + .default(Date.now) + .describe('the start time'), + elapsed: z + .number() + .int() + .gte(0) + .describe('the time the operation has been running for in ms'), + iterations: z + .number() + .int() + .gte(0) + .describe('the number of planned iterations to run'), + completed: z + .number() + .int() + .gte(0) + .default(0) + .describe( + 'the lowest number of iterations of a test that have completed. Tests complete asynchronously, so one test might complete all its planned iterations before another test. The overall progress is based on the lowest number of completed iterations', + ), + }) + .describe( + 'A summary of the test results combining all individual results into a ratio', + ) + +export const testCaseSummary = summary + .extend({ + name: z.string().describe('the name of the test case'), + promptLists: z.array(z.array(z.string())) + .describe('the array of prompt arrays used for each iteration'), + expectations: z + .array(z.string()) + .describe('the expectations for this test case'), + dependencies: z.array(z.number().int().gte(0)).describe( + 'Cases that must be run before this case, to set the starting state', + ), + successes: z + .array(z.number().int().gte(0)) + .describe( + 'for each expectation, the sum of the successful outcomes so far. When divided by the number of completed iterations, the ratio of successful outcomes is calculated', + ), + }) + .strict() + .describe( + 'A summary of the test results combining all individual results into a ratio', + ) + +export type TestCase = z.infer +export const testCase = z + .object({ + summary: testCaseSummary + .refine((v) => !!v.expectations.length, { + message: 'expectations must be non-empty', + }) + .refine((v) => v.completed <= v.iterations, { + message: 'completed cannot be greater than iterations', + }) + .refine((v) => v.successes.length === v.expectations.length, { + message: 'successes length must equal expectations length', + }) + .refine((v) => v.successes.every((success) => success <= v.completed), { + message: 'successes cannot be greater than completed', + }), + iterations: z.array(testIteration) + .describe('the outcome and info about each test run that has executed'), + }) + .strict() + .describe('summary and runs output of a single test') + .refine( + (v) => + v.iterations.every((run) => + run.outcomes.length === v.summary.expectations.length + ), + { message: 'outcomes count must match expectations count' }, + ) + .refine((v) => v.iterations.length <= v.summary.iterations, { + message: 'runs cannot be greater than iterations', + }) + .refine((v) => v.iterations.length === v.summary.completed, { + message: 'runs must equal completed', + }) + .refine((v) => { + const tally = v.summary.successes.map(() => 0) + for (const run of v.iterations) { + run.outcomes.forEach(({ outcome }, index) => { + if (outcome) { + tally[index]++ + } + }) + } + return v.summary.successes.every((success, index) => + success === tally[index] + ) + }, { message: 'runs outcomes must sum to successes' }) + +export const testFileSummary = summary.extend({ + hash: md5 + .describe('the hash of the test file used to generate the test run'), + path: z.string().regex(/\.test\.md$/) + .describe('the path to the test file, which must end in .test.md'), + target: z.string() + .describe( + 'the path to the target agent file under test, typically something in the agents/ directory', + ), + assessor: z.string() + .describe( + 'the path to the agent file to use to do the assessment of the test outcomes, typically agents/test-assessor.md or something in the agents/ directory', + ), +}) + .strict() + +export type TestFile = z.infer +export const testFile = z + .object({ + summary: testFileSummary + .refine((value) => value.completed <= value.iterations, { + message: 'completed cannot be greater than iterations', + }), + cases: z.array(testCase).describe('the results of each test case'), + }) + .strict() + .refine((v) => { + let index = 0 + for (const testCase of v.cases) { + for (const dependency of testCase.summary.dependencies || []) { + if (dependency >= index) { + return false + } + } + index++ + } + return true + }, { message: '"dependencies" must refer to earlier cases' }) + +export type TestController = z.infer +export const testController = z.object({ + globs: z.array(z.string()).default([]) + .describe('the globs to select the files to run'), + files: z.array(z.object({ + path: z.string(), + status: z.enum(['pending', 'running', 'complete', 'error']), + })).describe( + 'the files to run after resolving the globs, in run order, along with their run status', + ), + concurrency: z.number().int().gt(0).default(1).describe( + 'the number of files to run concurrently', + ), +}).strict() + +export const create = ( + path: string, + target: string, + assessor: string, + iterations: number, + hash: string, +) => { + const blank: TestFile = { + summary: { + timestamp: Date.now(), + path, + hash, + target, + assessor, + elapsed: 0, + iterations, + completed: 0, + }, + cases: [], + } + return testFile.parse(blank) +} + +export const addCase = ( + tpsReport: TestFile, + name: string, + promptLists: string[][], + expectations: string[], + dependencies: number[], +) => { + const parsed = testFile.parse(tpsReport) + const test: TestCase = { + summary: { + name, + timestamp: Date.now(), + elapsed: 0, + iterations: parsed.summary.iterations, + promptLists, + expectations, + completed: 0, + successes: Array(expectations.length).fill(0), + dependencies, + }, + iterations: [], + } + parsed.cases.push(test) + return testFile.parse(parsed) +} + +export const addCaseResult = ( + base: TestFile, + caseIndex: number, + caseResult: TestCase, +) => { + const clean = testFile.parse(base) + const test = clean.cases[caseIndex] + if (test.iterations.length) { + throw new Error('iterations already exist for: ' + caseIndex) + } + clean.cases[caseIndex] = caseResult + return updateSummary(clean) +} + +export const addIteration = ( + base: TestFile, + caseIndex: number, + iterationIndex: number, + iteration: TestIteration, +) => { + const clean = testFile.parse(base) + const test = clean.cases[caseIndex] + test.summary.completed++ + test.summary.elapsed = Date.now() - test.summary.timestamp + iteration.outcomes.forEach(({ outcome }, index) => { + if (outcome) { + test.summary.successes[index]++ + } + }) + if (test.iterations[iterationIndex]) { + throw new Error('iteration already exists: ' + iterationIndex) + } + test.iterations[iterationIndex] = iteration + return updateSummary(clean) +} + +const updateSummary = (tpsReport: TestFile) => { + let leastCompleted = Number.MAX_SAFE_INTEGER + for (const test of tpsReport.cases) { + if (test.summary.completed < leastCompleted) { + leastCompleted = test.summary.completed + } + } + tpsReport.summary.completed = leastCompleted + tpsReport.summary.elapsed = Date.now() - tpsReport.summary.timestamp + return testFile.parse(tpsReport) +} diff --git a/_import-artifact/api/types.ts b/_import-artifact/api/types.ts new file mode 100644 index 000000000000..de0c4dc1ae9d --- /dev/null +++ b/_import-artifact/api/types.ts @@ -0,0 +1,907 @@ +// copied from the artifact project +import { Chalk } from 'chalk' +import { z, ZodSchema } from 'zod' +export type { AssistantMessage, CompletionMessage } from './zod.ts' +import { completionMessage } from './zod.ts' +import type { Backchat } from './client-backchat.ts' +import { assert } from '@sindresorhus/is' +import type OpenAI from 'openai' +import { randomness } from './randomness.ts' +export { randomness } +type CommitOid = string + +const sequenceInteger = z.number().int().gte(0) +const sequenceKey = z.string().refine((data) => { + try { + return sequenceInteger.safeParse(Number.parseInt(data)).success + } catch (error) { + return !error + } +}, 'sequence key must be an integer') + +export { type Backchat } +export const Proctype = z.enum(['SERIAL', 'BRANCH', 'DAEMON', 'EFFECT']) +// TODO FORGET = 'FORGET', // allow fire and forget actions +// BUT forget needs to be a separate option as we need DAEMON and FORGET +// together to allow for a fire and forget branches +// OR make DAEMON be the same as FORGET since no new info need be returned ? + +export type STATEBOARD_WIDGETS = z.infer +export const STATEBOARD_WIDGETS = z.enum([ + 'TPS_REPORT', + 'FILE_EXPLORER', + 'MARKDOWN_EDITOR', + 'BRANCH_EXPLORER', + 'COMMIT_GRAPH', + 'COMMIT_INFO', + 'THREADS', + 'REPOS', +]) +export const md5 = z.string().regex(/^[a-f0-9]{40}$/, 'Invalid MD5 hash') +export const githubRegex = /^[a-zA-Z\d](?:[a-zA-Z\d]|[-.](?=[a-zA-Z\d])){0,38}$/ +export const repoIdRegex = /^rep_[0-9A-HJKMNP-TV-Z]{16}$/ +export const machineIdRegex = /^mac_[2-7a-z]{33}$/ +export const actorIdRegex = /^act_[0-9A-HJKMNP-TV-Z]{16}$/ +export const backchatIdRegex = /^bac_[0-9A-HJKMNP-TV-Z]{16}$/ +export const threadIdRegex = /^the_[0-9A-HJKMNP-TV-Z]{16}$/ +export const agentHashRegex = /^age_[0-9A-HJKMNP-TV-Z]{16}$/ + +export const SU_ACTOR = 'act_0000000000000000' +export const SU_BACKCHAT = 'bac_0000000000000000' +export const pidSchema = z.object({ + /** + * The hash of the genesis commit is used to identify this repo in a + * cryptographically secure way. This repoId is used to reference this repo + * unique with strong guarantees that this is the correct repo that + * communication was intended with. + */ + repoId: z.string().regex(repoIdRegex), + account: z.string().regex(githubRegex), + repository: z.string().regex(githubRegex), + branches: z.array(z.string()).min(1), +}) +export const triad = z.object({ + path: z.string(), + pid: pidSchema, + commit: z.string(), +}) +export type Triad = z.infer +export type ApiFunction = { + (): unknown | Promise + (...args: [{ [key: string]: unknown }]): unknown | Promise +} +export type ApiFunctions = { + [key: string]: ApiFunction +} + +export type IsolateReturn = JsonValue | undefined | void +export type ProcessOptions = { + /** + * Any function called with this option will be executed in parallel + * in a new branch, with no guarantee of order of execution. A call to this + * function will cause 3 commits to occur, 2 of which may be pooled with other + * functions. The commits are: + * 1. The current branch, to declare the function invocation - may be pooled + * 2. The new branch, to conclude the function invocation - may be skippable + * if no fs changes were made + * 3. The current branch, to merge the result back in - may be pooled + * + * Without this option, the functions will be executed in the same + * branch as the caller, and will be executed in the order that any other + * similarly called functions were invoked. + * A call to this function will cause two commits to occur on the current + * branch - the first to store the function call, and the second to store the + * result. Both commits may be shared with other function calls. + */ + branch?: boolean + /** + * Enables Daemon mode, where the branch will not be closed after the process + * is done. + */ + noClose?: boolean + /** Set a prefix for the new branch name, which will be combined with a + * random id and separated by a "-". Implies branch = true + */ + prefix?: string + /** Set the name of the new branch. Will error if this exists already */ + branchName?: string + /** Provide file paths that will be deleted in the new branch */ + deletes?: string[] +} + +export type DispatchFunctions = { + [key: string]: (params?: Params) => Promise | unknown +} + +export type IsolateApiSchema = { + [key: string]: object +} + +export const ENTRY_BRANCH = 'main' + +export type PartialPID = Omit + +const int = z.number().int().gte(0) + +const usageSchema = z.object({ + /** + * Number of tokens in the generated completion. + */ + completion_tokens: int, + /** + * Number of tokens in the prompt. + */ + prompt_tokens: int, + /** + * Total number of tokens used in the request (prompt + completion). + */ + total_tokens: int, + /** + * Breakdown of tokens used in a completion. + */ + completion_tokens_details: z.object({ + /** + * Audio input tokens generated by the model. + */ + audio_tokens: int.optional(), + /** + * Tokens generated by the model for reasoning. + */ + reasoning_tokens: int.optional(), + }).optional(), + /** + * Breakdown of tokens used in the prompt. + */ + prompt_tokens_details: z.object({ + /** + * Audio input tokens present in the prompt. + */ + audio_tokens: int.optional(), + /** + * Cached tokens present in the prompt. + */ + cached_tokens: int.optional(), + }).optional(), +}) + +export const messageStatsSchema = z.object({ + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + created: z.number().int().gte(0), + /** The model used for the completion. */ + model: z.string(), + /** The system fingerprint of the completion. */ + system_fingerprint: z.string(), + /** The duration of the completion in milliseconds from our side. */ + duration: z.number().int().gte(0), + /** The duration of the completion in milliseconds from OpenAI's side. + */ + openAiProcessingMs: z.number().int().gte(0), + /** The usage of the completion. */ + usage: usageSchema, +}) + +export const threadSchema = z.object({ + /** The current agent the conversation is with */ + agent: z.string(), + /** The remote thread the conversation is currently with */ + remote: pidSchema.optional(), + /** If the messages were truncated, this is the offset count */ + messageOffset: int, + messages: z.array(completionMessage), + toolCommits: z.record( + /** The tool call id */ + z.string(), + /** The commit hash the tool ended on */ + md5, + ), + messageStats: z.record( + /** The message index that this stat is for */ + z.string(), + messageStatsSchema, + ), + /** Have any files been changed in this threads branch */ + isDirty: z.boolean().optional(), + summaries: z.array( + z.object({ + title: z.string(), + summary: z.string(), + /** The message index that this summary starts with */ + start: z.number().int().gte(0), + /** The message index that this summary ends with */ + end: z.number().int().gte(0).optional(), + }).refine((data) => data.end === undefined || data.end >= data.start, { + message: "'end' must be greater than or equal to 'start'", + path: ['end'], + }), + ).optional(), + /** History of stateboard changes */ + stateboards: z.array(z.object({ + /** What message number set the stateboard change */ + + setter: int, + commit: z.string(), + })), + /** History of what the focus file path was set to (like the CWD). Allows + * statements like "the previous file", "that other file", and "three files + * ago" */ + focusedFiles: z.array(z.object({ + /** The message number that set the focus */ + setter: int, + focus: z.object({ + // Define the structure of PathTriad here + }), + })), +}) +export type Thread = z.infer + +export type AssistantsThread = Thread & { + externalId: string + messages: OpenAI.Beta.Threads.Message[] + additionalMessages: OpenAI.Beta.Threads.RunCreateParams.AdditionalMessage[] +} +export type RemoteThread = { + /** The location in the remote repo and the last known commit we have of it */ + triad: Triad +} + +export type PathTriad = { + path: string + pid?: PID + commit?: CommitOid +} + +export const isPierceRequest = (p: Request): p is Pierce => { + return 'ulid' in p +} +export type Params = { [key: string]: JsonValue } + +const literalSchema = z.union([z.string(), z.number(), z.boolean(), z.null()]) +type Literal = z.infer +export type JsonValue = Literal | { [key: string]: JsonValue } | JsonValue[] +export const jsonSchema: z.ZodType = z.lazy(() => + z.union([literalSchema, z.array(jsonSchema), z.record(jsonSchema)]) +) + +const invocation = z.object({ + isolate: z.string(), + functionName: z.string(), + params: z.record(jsonSchema), + proctype: Proctype, + /** + * Allow a custom name for the new branch, if this is a branching request + */ + branch: z.string().optional(), + /** + * If the custom branch name might not be unique, a prefix can be given and + * the sequence number will be appended to the branch name, ensuring + * uniqueness. + */ + branchPrefix: z.string().optional(), + /** + * If the request is a branching request, this will be the name of the new + * branch. If the branch already exists, the request will fail. + */ + branchName: z.string().optional(), + /** Relative paths to delete in the branch */ + deletes: z.array(z.string()).optional(), + effect: z.union([ + z.boolean(), + z.object({ + /** does this side effect have access to the network ? */ + net: z.boolean().optional(), + /** does this side effect have access to the files of the repo ? */ + files: z.boolean().optional(), + /** can this side effect make execution requests in artifact ? */ + artifact: z.boolean().optional(), + /** Specify the maximum time to wait for this side effect to complete */ + timeout: z.number().optional(), + }), + ]).optional(), +}) +export type Invocation = z.infer +/** + * The Process Identifier used to address a specific process branch. + */ +export type PID = z.infer +/** + * A request that has been included in a commit, therefore has a sequence number + */ +export type SolidRequest = z.infer +const solidRequest = invocation.extend({ + target: pidSchema, + source: pidSchema, + sequence: sequenceInteger, +}) + +/** A request that travels between branches */ +export type RemoteRequest = z.infer +export const remoteRequest = solidRequest.extend({ commit: md5 }) + +export type Pierce = z.infer +export const pierceSchema = invocation.extend({ + target: pidSchema, + ulid: z.string(), +}) + +export type UnsequencedRequest = z.infer +export const unsequencedRequest = invocation.extend({ target: pidSchema }) + +export type Request = z.infer +export const requestSchema = z.union([ + pierceSchema, + solidRequest, + remoteRequest, +]) +// TODO remove this by passing ProcessOptions in with the Request +export const getProcType = (procOpts?: ProcessOptions) => { + if (!procOpts) { + return Proctype.enum.SERIAL + } + if (procOpts.noClose) { + return Proctype.enum.DAEMON + } + if ( + procOpts.deletes || procOpts.branch || procOpts.branchName || + procOpts.prefix + ) { + return Proctype.enum.BRANCH + } + return Proctype.enum.SERIAL +} +/** Here is where additional AI models and runner techniques can be added */ +export enum AGENT_RUNNERS { + CHAT = 'ai-runner', +} + +export type Change = { + /** If present, represents the unified diff of the file at the given path, + * since the last commit. This is only provided if the file is a string. If + * the changes are too large, this will be missing and an oid will be provided. + */ + patch?: string + /** + * The oid of the object given by path, which can be used to fetch the object + * directly. If this file is binary, then patch will be missing, and the file + * should be directly retrieved. If oid is missing, then the change was + * fatal. + */ + oid?: string +} +export type Splice = { + pid: PID + /** + * The hash of the commit object + */ + oid: string + /** + * The commit this splice refers to + */ + commit: CommitObject + /** + * The timestamp of the commit, or if transient, the timestamp of the write + * that caused this update + */ + timestamp: number + changes: { [key: string]: Change } +} +export declare interface EventSourceMessage { + /** The data received for this message. */ + data: string + /** Event name sent from the server, or `undefined` if none is set for this message. */ + event?: string + /** ID of the message, if any was provided by the server. */ + id?: string +} +/** + * A git commit object. + */ +export type CommitObject = { + /** + * Commit message + */ + message: string + /** + * SHA-1 object id of corresponding file tree + */ + tree: string + /** + * an array of zero or more SHA-1 object ids + */ + parent: string[] + author: { + /** + * The author's name + */ + name: string + /** + * The author's email + */ + email: string + /** + * UTC Unix timestamp in seconds + */ + timestamp: number + /** + * Timezone difference from UTC in minutes + */ + timezoneOffset: number + } + committer: { + /** + * The committer's name + */ + name: string + /** + * The committer's email + */ + email: string + /** + * UTC Unix timestamp in seconds + */ + timestamp: number + /** + * Timezone difference from UTC in minutes + */ + timezoneOffset: number + } + /** + * PGP signature (if present) + */ + gpgsig?: string +} + +export interface EngineInterface { + /** + * The address in use as basis of identity for this engine. May be a repo + * hosted on external engines, or hosted in cooperation with other engines. + */ + homeAddress: PID + abortSignal: AbortSignal + upsertBackchat(machineId: string, resume?: string): Promise + stop(): Promise | void + /** + * Send a ping to the edge isolate that will process requests, to establish + * basic network connectivity. To ping a chain, use an isolate. + * TODO ping should return some info about the deployment + * @param data Data that will be echoed back + */ + ping(data?: JsonValue): Promise + apiSchema(isolate: string): Promise> + transcribe(audio: File): Promise<{ text: string }> + pierce(pierce: Pierce): Promise + watch( + pid: PID, + path?: string, + after?: string, + signal?: AbortSignal, + ): AsyncIterable + splice( + target: PID, + opts?: { commit?: string; path?: string; count?: number }, + ): Promise + read(path: string, pid: PID, commit?: string): Promise + readTree(path: string, pid: PID, commit?: string): Promise + readJSON(path: string, pid: PID, commit?: string): Promise + readBinary(path: string, pid?: PID, commit?: string): Promise + exists(path: string, pid: PID): Promise +} +export const isPID = (value: unknown): value is PID => { + if (typeof value !== 'object' || value === null) { + return false + } + const pid = value as PID + return ( + typeof pid.account === 'string' && + typeof pid.repository === 'string' && + Array.isArray(pid.branches) && + pid.branches.every((branch) => typeof branch === 'string') + ) +} +const { black, red, green, blue, magenta, cyan, bold } = new Chalk({ level: 1 }) +const colors = [red, green, blue, magenta, cyan, black] +let colorIndex = 0 +const colorMap = new Map() +export const colorize = ( + string: string, + noSubstring = false, + noColor = false, +) => { + let sub = string + if (!noSubstring) { + sub = string.substring(0, 7) + } + let index + if (noColor) { + return sub + } + if (colorMap.has(sub)) { + index = colorMap.get(sub)! + } else { + index = colorIndex++ + if (colorIndex === colors.length) { + colorIndex = 0 + } + colorMap.set(sub, index) + } + + return colors[index](bold(sub)) +} +export const print = (pid?: PID, noColor = false) => { + if (!pid) { + return '(no pid)' + } + const branches = pid.branches.map((segment) => { + const noSubstring = !segment.startsWith('mac_') && + !segment.startsWith('bac_') && + !segment.startsWith('act_') && + !segment.startsWith('rep_') && + !segment.startsWith('the_') + return colorize(segment, noSubstring, noColor) + }) + const noSubstring = false + const repoId = colorize(pid.repoId, noSubstring, noColor) + return `${repoId}/${pid.account}/${pid.repository}:${branches.join('/')}` +} +export const printPlain = (pid?: PID) => { + const noColor = true + return print(pid, noColor) +} +export const freezePid = (pid: PID) => { + if (!pid.repoId) { + throw new Error('repoId is required') + } + if (!repoIdRegex.test(pid.repoId)) { + throw new Error('Invalid repoId: ' + pid.repoId) + } + if (!pid.account) { + throw new Error('account is required') + } + if (!pid.repository) { + throw new Error('repository is required') + } + if (!pid.branches[0]) { + throw new Error('branch is required') + } + if (!githubRegex.test(pid.account) || !githubRegex.test(pid.repository)) { + const repo = `${pid.account}/${pid.repository}` + throw new Error('Invalid GitHub account or repository name: ' + repo) + } + Object.freeze(pid) + Object.freeze(pid.branches) + return pid +} +export const partialFromRepo = (repo: string) => { + const [account, repository] = repo.split('/') + const pid: PartialPID = { + account, + repository, + branches: [ENTRY_BRANCH], + } + return pid +} + +export const HAL: Omit = { + account: 'dreamcatcher-tech', + repository: 'HAL', + branches: ['main'], +} +export type RpcOpts = { target?: PID } & ProcessOptions +export const toActions = ( + target: PID, + isolate: string, + schema: IsolateApiSchema, + procOpts: ProcessOptions, + execute: (request: UnsequencedRequest) => unknown | Promise, +) => { + procOpts = procOpts || {} + if (procOpts.prefix && procOpts.branchName) { + throw new Error('failed mutex: ' + print(target)) + } + const proctype = getProcType(procOpts) + const actions: DispatchFunctions = {} + for (const functionName of Object.keys(schema)) { + actions[functionName] = (arg1: Params = {}) => { + const params = safeParams(arg1) + const unsequencedRequest: UnsequencedRequest = { + target, + isolate, + functionName, + params, + proctype, + } + if (procOpts.prefix) { + unsequencedRequest.branchPrefix = procOpts.prefix + } + if (procOpts.branchName) { + unsequencedRequest.branchName = procOpts.branchName + } + if (procOpts.deletes) { + unsequencedRequest.deletes = procOpts.deletes + } + return execute(unsequencedRequest) + } + } + return actions as PromisifyFunctionReturnTypes +} +type PromisifyFunctionReturnTypes = { + [K in keyof T]: T[K] extends (...args: infer Args) => infer R + ? (...args: Args) => R extends Promise ? R : Promise + : T[K] +} +const safeParams = (params: Params) => { + const safe = { ...params } + for (const key in safe) { + if (safe[key] === undefined) { + delete safe[key] + } + } + checkUndefined(safe) + return safe +} +const checkUndefined = (params: Params) => { + for (const key in params) { + if (params[key] === undefined) { + throw new Error('undefined value: ' + key) + } + if (typeof params[key] === 'object') { + checkUndefined(params[key] as Params) + } + } +} + +export const generateActorId = () => { + return 'act_' + randomness() +} +export const generateBackchatId = () => { + return 'bac_' + randomness() +} + +export const getActorId = (source: PID) => { + const [base, actorId] = source.branches + const parent = { ...source, branches: [base] } + const fullHAL = { ...HAL, repoId: source.repoId } + if (!isPidEqual(parent, fullHAL)) { + throw new Error('source is not a child of HAL') + } + if (!actorIdRegex.test(actorId)) { + throw new Error('Invalid actor id: ' + actorId) + } + return actorId +} +export const getActorPid = (source: PID) => { + const actorId = getActorId(source) + const branches = [source.branches[0], actorId] + return { ...source, branches } +} +export const isActorBranch = (pid: PID) => { + if (pid.branches.length !== 2) { + return false + } + return !!getActorId(pid) +} + +export const isPidEqual = (pid1: PID, pid2: PID) => { + // TODO why not just use the fast-equals utility ? + if (pid1.repoId !== pid2.repoId) { + return false + } + if (pid1.account !== pid2.account) { + return false + } + if (pid1.repository !== pid2.repository) { + return false + } + if (pid1.branches.length !== pid2.branches.length) { + return false + } + for (let i = 0; i < pid1.branches.length; i++) { + if (pid1.branches[i] !== pid2.branches[i]) { + return false + } + } + return true +} +export const META_SYMBOL = Symbol.for('settling commit') +export type Meta = { + parent?: CommitOid + // TODO add the PID so we know what the id of the branch that returned was +} +export const withMeta = async (promise: MetaPromise) => { + const result = await promise + assert.truthy(META_SYMBOL in promise, 'missing commit symbol') + const meta = promise[META_SYMBOL] + assert.object(meta, 'missing meta on promise') + const { parent } = meta + if (parent) { + assert.string(parent, 'missing parent commit') + assert.truthy(sha1.test(parent), 'commit not sha1: ' + parent) + } + return { result, parent } +} +export const sha1 = /^[0-9a-f]{40}$/i +export type MetaPromise = Promise & { [META_SYMBOL]?: Meta } + +export const addBranches = (pid: PID, ...children: string[]) => { + const next = { ...pid, branches: [...pid.branches, ...children] } + return freezePid(next) +} +export const addPeer = (pid: PID, peer: string) => { + const branches = [...pid.branches] + branches.pop() + const next = { ...pid, branches: [...branches, peer] } + return freezePid(next) +} +export const getParent = (pid: PID) => { + const branches = [...pid.branches] + branches.pop() + return freezePid({ ...pid, branches }) +} +export const getRoot = (pid: PID) => { + const branches = [pid.branches[0]] + return freezePid({ ...pid, branches }) +} +export const getBaseName = (pid: PID) => { + return pid.branches[pid.branches.length - 1] +} + +export const getContent = (message: AssistantsThread['messages'][number]) => { + const { content } = message + if (content[0].type !== 'text') { + throw new Error('content not text') + } + return content[0].text.value +} +export const getThreadPath = (pid: PID) => { + const [, , ...actorChildBranches] = pid.branches + const threadPath = actorChildBranches.join('/') + const path = `threads/${threadPath}.json` + return path +} + +export const agentConfigSchema = z.object({ + model: z.enum([ + 'gpt-3.5-turbo', + 'gpt-4-turbo', + 'gpt-4o', + 'gpt-4o-mini', + 'o1-preview', + 'o1-mini', + ]), + temperature: z.number().gte(0).lte(2).optional(), + presence_penalty: z.number().optional(), + tool_choice: z.enum(['auto', 'none', 'required']).optional().describe( + 'control model behaviour to force it to call a tool or no tool', + ), + parallel_tool_calls: z.boolean().optional().describe( + 'Is the model permitted to call more than one function at a time. Must be false to use strict function calling', + ), +}) + +export const agentSchema = z.object({ + name: z.string().regex(/^[a-zA-Z0-9_-]+$/), + source: triad.describe('Where exactly did this agent come from'), + description: z.string().optional(), + config: agentConfigSchema, + runner: z.enum(['ai-runner']), + commands: z.array(z.string()), + napps: z.array(z.string()), + instructions: z.string().max(256000), +}) +export type Agent = z.infer + +export const chatParams = agentConfigSchema.extend({ + messages: z.array(completionMessage), + seed: z.literal(1337), + tools: z.array(z.object({ + type: z.literal('function'), + function: z.object({ + name: z.string(), + description: z.string().optional(), + parameters: z.object({}).passthrough().optional(), + strict: z.boolean().optional().nullable(), + }), + })).optional(), +}) +export type ChatParams = z.infer + +export const backchatStateSchema = z.object({ + /** The base thread that this backchat session points to - the thread of last resort */ + target: pidSchema, + threadCount: z.number().int().gte(0), +}) + +export type Returns> = { + [K in keyof T]: ZodSchema +} +// TODO ensure that function return types are inferred from returns object +export type ToApiType< + P extends Record, + R extends Returns

, +> = { + [K in keyof P]: ( + params: z.infer, + ) => z.infer | Promise> +} + +export const serializableError = z.object({ + name: z.string().optional(), + message: z.string(), + stack: z.string().optional(), +}) +export type SerializableError = z.infer + +export const outcomeSchema = z.object({ + result: jsonSchema.optional(), + error: serializableError.optional(), +}).refine((data) => { + if (data.error !== undefined) { + return data.result === undefined + } + return true +}, 'result and error are mutually exclusive') + +export type Outcome = { result?: JsonValue; error?: SerializableError } + +export type IoStruct = z.infer +export const ioStruct = z.object({ + sequence: sequenceInteger, + /** The current sequence of the request being executed serially */ + executing: sequenceInteger.optional(), + /** The sequences of requests that have been executed serially */ + executed: z.record(sequenceKey, z.boolean()), + // TODO make the requests be a zod schema + requests: z.record(sequenceKey, requestSchema), + replies: z.record(sequenceKey, outcomeSchema), + /** If a reply is a merge reply, the commit that carried it is stored here */ + parents: z.record(sequenceKey, md5), + /** + * If a request generates child requests, they are tracked here. The commit + * in each entry is the commit that caused the child requests to be generated. + * This is used to replay by resetting the fs to that commit and doing a + * replay. + */ + pendings: z.record( + sequenceKey, + z.array(z.object({ + commit: md5, + sequences: z.array(sequenceInteger), + })), + ), + /** Active branches are stored here. A branch is a daemon if it is listed + * here but its request has been replied to or it is gone from the requests + * list */ + branches: z.record(sequenceKey, z.string()), + /** + * Isolates can store values here and know they will not leak into other + * branches, and will be quick to access since the io file is always loaded. + */ + state: z.record(jsonSchema), +}) +export const reasoning = z.array(z.string()).describe( + 'Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function', +) +export type TreeEntry = { + /** + * - the 6 digit hexadecimal mode + */ + mode: string + /** + * - the name of the file or directory + */ + path: string + /** + * - the SHA-1 object id of the blob or tree + */ + oid: string + /** + * - the type of object + */ + type: 'blob' | 'tree' | 'commit' +} +export const pooledRef = z.object({ + commit: md5, + sequence: sequenceInteger, + source: pidSchema, + isReply: z.boolean(), +}) +export type PooledRef = z.infer diff --git a/_import-artifact/api/watcher.ts b/_import-artifact/api/watcher.ts new file mode 100644 index 000000000000..f1abda3d4219 --- /dev/null +++ b/_import-artifact/api/watcher.ts @@ -0,0 +1,88 @@ +import { EngineInterface, IoStruct, isPierceRequest, PID } from './types.ts' +import { deserializeError } from 'serialize-error' + +type PiercePromise = { + resolve: (value: unknown) => void + reject: (error: Error) => void +} +export class PierceWatcher { + readonly #pierces = new Map() + readonly #engine: EngineInterface + readonly #pid: PID + readonly #abort = new AbortController() + constructor(signal: AbortSignal, engine: EngineInterface, pid: PID) { + this.#engine = engine + this.#pid = pid + + signal.addEventListener('abort', () => { + this.#abort.abort() + }) + engine.abortSignal.addEventListener('abort', () => { + this.#abort.abort() + }) + } + static create(signal: AbortSignal, engine: EngineInterface, pid: PID) { + return new PierceWatcher(signal, engine, pid) + } + watch(ulid: string) { + return new Promise((resolve, reject) => { + this.#pierces.set(ulid, { resolve, reject }) + }) + } + async watchPierces() { + let lastSplice + const after = undefined + const { signal } = this.#abort + const s = this.#engine.watch(this.#pid, '.io.json', after, signal) + for await (const splice of s) { + // move these checks to the engine side + if (lastSplice && splice.commit.parent[0] !== lastSplice.oid) { + console.dir(splice, { depth: Infinity }) + console.dir(lastSplice, { depth: Infinity }) + throw new Error('parent mismatch: ' + splice.oid) + } + lastSplice = splice + if (splice.changes['.io.json']) { + const { patch } = splice.changes['.io.json'] + // TODO move to unified diff patches + if (!patch) { + throw new Error('io.json patch not found') + } + const io = JSON.parse(patch) + this.#resolvePierces(io) + } + } + } + #resolvePierces(io: IoStruct) { + for (const [, value] of Object.entries(io.requests)) { + if (isPierceRequest(value)) { + if (this.#pierces.has(value.ulid)) { + const outcome = getOutcomeFor(io, value.ulid) + if (outcome) { + const promise = this.#pierces.get(value.ulid) + this.#pierces.delete(value.ulid) + if (!promise) { + throw new Error('Promise not found') + } + if (outcome.error) { + promise.reject(deserializeError(outcome.error)) + } else { + promise.resolve(outcome.result) + } + } + } + } + } + } +} + +const getOutcomeFor = (io: IoStruct, ulid: string) => { + for (const [key, value] of Object.entries(io.requests)) { + if (isPierceRequest(value)) { + if (value.ulid === ulid) { + const sequence = parseInt(key) + return io.replies[sequence] + } + } + } +} diff --git a/_import-artifact/api/zod.ts b/_import-artifact/api/zod.ts new file mode 100644 index 000000000000..b26b81347e60 --- /dev/null +++ b/_import-artifact/api/zod.ts @@ -0,0 +1,92 @@ +import { z } from 'zod' + +// Define the schema for content parts (text and image) +const ChatCompletionContentPartTextSchema = z.object({ + type: z.literal('text'), + text: z.string(), +}) + +const ChatCompletionContentPartImageSchema = z.object({ + type: z.literal('image_url'), + image_url: z.object({ + url: z.string(), + detail: z.enum(['auto', 'low', 'high']).optional(), + }), +}) + +const ChatCompletionContentPartRefusalSchema = z.object({ + refusal: z.string(), + type: z.literal('refusal'), +}) + +const ChatCompletionContentSchema = z.union([ + z.string(), + z.array( + z.union([ + ChatCompletionContentPartTextSchema, + ChatCompletionContentPartImageSchema, + ]), + ), +]) + +const ChatCompletionContentResponseSchema = z.union([ + z.string(), + z.array(ChatCompletionContentPartTextSchema), +]) + +const systemMessage = z.object({ + content: ChatCompletionContentResponseSchema, + role: z.literal('system'), + name: z.string().optional(), +}) + +// Define the user message schema +const userMessage = z.object({ + content: ChatCompletionContentSchema, + role: z.literal('user'), + name: z.string().optional(), +}) + +// Define the assistant message schema +export type AssistantMessage = z.infer< + typeof assistantMessage +> +export const assistantMessage = z.object({ + role: z.literal('assistant'), + content: z.union([ + z.string(), + z.array(z.union([ + ChatCompletionContentPartTextSchema, + ChatCompletionContentPartRefusalSchema, + ])), + ]).optional().nullable(), + name: z.string().optional(), + refusal: z.string().optional().nullable(), + tool_calls: z + .array( + z.object({ + id: z.string(), + type: z.literal('function'), + function: z.object({ + name: z.string(), + arguments: z.string(), + }), + }), + ) + .optional(), +}) + +export type ToolMessage = z.infer +const toolMessage = z.object({ + role: z.literal('tool'), + content: ChatCompletionContentResponseSchema, + tool_call_id: z.string(), +}) + +export type CompletionMessage = z.infer +export const completionMessage = z.union([ + systemMessage, + userMessage, + assistantMessage, + toolMessage, +]) diff --git a/_import-artifact/atomic.ts b/_import-artifact/atomic.ts new file mode 100644 index 000000000000..9b5ecc8df3e3 --- /dev/null +++ b/_import-artifact/atomic.ts @@ -0,0 +1,175 @@ +import * as keys from './keys.ts' +import { hasPoolables } from '@/db.ts' +import { + isMergeReply, + PID, + Poolable, + PooledRef, + QueueMessage, + QueueMessageType, + sha1, +} from '@/constants.ts' +import { assert, Debug, isKvTestMode } from '@utils' + +const log = Debug('AI:db:atomic') + +export class Atomic { + // TODO handle max pool size creating too large an atomic transaction + #kv: Deno.Kv + #atomic: Deno.AtomicOperation | undefined + #transmitted = new Map() + private constructor(kv: Deno.Kv) { + this.#kv = kv + this.#atomic = kv.atomic() + } + static create(kv: Deno.Kv) { + return new Atomic(kv) + } + /** + * Used in two scenarios: + * 1. When a pierce occurs, and adding to the pool is at the point of ulid + * generation, guaranteeing atomicity by way of the ulid + * 2. During the atomic transmission process, where it is guaranteed to only + * be added if the commit to head is successful + * @param poolable + * @returns + */ + addToPool(poolable: Poolable) { + assert(this.#atomic, 'Atomic not set') + log('pooling start %o', poolable) + const key = keys.getPoolKey(poolable) + const empty = { key, versionstamp: null } + const { commit, sequence, source } = poolable + const isReply = isMergeReply(poolable) + + const ref: PooledRef = { commit, sequence, source, isReply } + this.#atomic = this.#atomic.check(empty).set(key, ref) + + const poolKey = JSON.stringify(keys.getPoolKeyPrefix(poolable.target)) + if (!this.#transmitted.has(poolKey)) { + this.#transmitted.set(poolKey, { pid: poolable.target, count: BigInt(0) }) + } + const buffer = this.#transmitted.get(poolKey) + assert(buffer) + buffer.count++ + return this + } + deletePool(pid: PID, poolKeys: Deno.KvKey[]) { + assert(this.#atomic, 'Atomic not set') + log('deletePool %o', poolKeys.map((key) => key[key.length - 1])) + for (const key of poolKeys) { + this.#atomic = this.#atomic.delete(key) + } + const markerKey = keys.getPoolMarkerKey(pid) + this.#atomic = this.#atomic.sum(markerKey, BigInt(poolKeys.length)) + return this + } + async updateHead(pid: PID, fromCommit: string, toCommit: string) { + assert(this.#atomic, 'Atomic not set') + assert(sha1.test(fromCommit), 'Commit not SHA-1: ' + fromCommit) + assert(sha1.test(toCommit), 'Commit not SHA-1: ' + toCommit) + assert(fromCommit !== toCommit, 'Commits are the same') + const key = keys.getHeadKey(pid) + log('updateHead %o', key, fromCommit, toCommit) + const from = await this.#kv.get(key) + if (from.value !== fromCommit) { + return false + } + this.#atomic = this.#atomic.check(from).set(key, toCommit) + return this + } + createBranch(pid: PID, commit: string) { + // TODO check the pid is based on this pid + assert(this.#atomic, 'Atomic not set') + assert(sha1.test(commit), 'Commit not SHA-1: ' + commit) + const key = keys.getHeadKey(pid) + this.#atomic = this.#atomic.check({ key, versionstamp: null }) + .set(key, commit) + return this + } + /** + * @param pid the branch to delete + * @param commit the current branch commit that is being deleted, which is + * used to provide safety in case something else has changed the branch + */ + async deleteBranch(pid: PID, commit: string) { + assert(this.#atomic, 'Atomic not set') + assert(sha1.test(commit), 'Commit not SHA-1: ' + commit) + const key = keys.getHeadKey(pid) + const current = await this.#kv.get(key) + if (!current.versionstamp || current.value !== commit) { + return false + } + this.#atomic = this.#atomic.check(current).delete(key) + return this + // TODO ensure this is tied in to the changing of the parent head + } + /** Queue up a serial request for execution + * @param commit the commit to provide to the execution environment when the + * request is run + */ + enqueueExecution(pid: PID, sequence: number, commit: string) { + const type = QueueMessageType.EXECUTION + return this.#enqueue({ type, pid, sequence, commit }) + } + enqueueBranch(parentCommit: string, parentPid: PID, sequence: number) { + const type = QueueMessageType.BRANCH + return this.#enqueue({ type, parentCommit, parentPid, sequence }) + } + #enqueuePool(pid: PID) { + const type = QueueMessageType.POOL + return this.#enqueue({ type, pid }) + } + #enqueue(message: QueueMessage) { + assert(this.#atomic, 'Atomic not set') + const backoffSchedule = isKvTestMode() ? [] : undefined + this.#atomic = this.#atomic.enqueue(message, { + keysIfUndelivered: [keys.UNDELIVERED], + backoffSchedule, + }) + return this + } + #increasePool(pid: PID, amount: bigint) { + assert(this.#atomic, 'Atomic not set') + const counterKey = keys.getPoolCounterKey(pid) + this.#atomic = this.#atomic.sum(counterKey, amount) + return this + } + #checkMarker(marker: Deno.KvEntryMaybe) { + assert(this.#atomic, 'Atomic not set') + this.#atomic = this.#atomic.check(marker) + return this + } + async commit() { + // TODO go thru all atomic usage and ensure all async ops are at the end + assert(this.#atomic, 'Atomic not set') + + const markerCounterKeys = [] + const transmissions = [...this.#transmitted.values()] + for (const { pid, count } of transmissions) { + markerCounterKeys.push(keys.getPoolMarkerKey(pid)) + const counterKey = keys.getPoolCounterKey(pid) + markerCounterKeys.push(counterKey) + this.#increasePool(pid, count) + } + const markerCounters = await this.#kv.getMany(markerCounterKeys) + for (let i = 0; i < markerCounters.length; i += 2) { + const marker = markerCounters[i] + const counter = markerCounters[i + 1] + + this.#checkMarker(marker) + const mayBeEmpty = true + if (!hasPoolables(counter, marker, mayBeEmpty)) { + // if one was processed, all were processed ☢️ + this.#enqueuePool(transmissions[i].pid) + } + } + const atomic = this.#atomic + this.#atomic = undefined + const result = await atomic.commit() + if (!result.ok) { + return false + } + return true + } +} diff --git a/_import-artifact/benchmarks/bench.ts b/_import-artifact/benchmarks/bench.ts new file mode 100644 index 000000000000..e4aecc9af195 --- /dev/null +++ b/_import-artifact/benchmarks/bench.ts @@ -0,0 +1,206 @@ +import Benchmark from 'benchmark' +import { Debug } from '@utils' +import { Engine } from '@/engine.ts' +import { Api } from '@/isolates/io-fixture.ts' +import { assert } from '@std/assert' +import DB from '@/db.ts' +import { Crypto } from '../api/crypto.ts' +import { Backchat } from '../api/client-backchat.ts' + +const log = Debug('AI:benchmarks') +Debug.enable('AI:benchmarks AI:qbr') +log('starting benchmarks') + +const suite = new Benchmark.Suite() + +const superuserKey = + 'ed7a15e43c8ca247b61b61af438392b31b71fe2e9eb500b58e0773fc5eb99b8b' +const aesKey = DB.generateAesKey() + +const engines: Engine[] = [] +const factory = async () => { + const engine = await engineFactory() + const privateKey = Crypto.generatePrivateKey() + const backchat = await Backchat.upsert(engine, privateKey) + return backchat +} +const engineFactory = async () => { + const engine = await Engine.provision(superuserKey, aesKey) + engines.push(engine) + return engine +} +const machineEngine = await engineFactory() +const machineEnginePrivateKey = Crypto.generatePrivateKey() +Crypto.load(machineEnginePrivateKey) // do premul crypto so run is not skewed + +const backchatStartThread = await factory() +const backchatReload = await factory() + +const hotPing = await factory() +const hotPingActions = await hotPing.actions('io-fixture') +const install = await factory() +let installCounter = 0 + +log('setup complete') + +suite + // ENGINE + .add('engine cold start', { + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + const engine = await Engine.provision(superuserKey, aesKey) + deferred.resolve() + await engine.stop() + }, + }) + // MACHINE + .add('machine root session', { // RENAME to: backchat create + // generate a new machine key and await backchat to upsert + defer: true, + async fn(deferred: Benchmark.Deferred) { + const privateKey = Crypto.generatePrivateKey() + await Backchat.upsert(machineEngine, privateKey) + deferred.resolve() + }, + }) + .add('machine reload', { // RENAME to: backchat reload + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + await Backchat.upsert(machineEngine, machineEnginePrivateKey) + deferred.resolve() + }, + }) + // SESSION + .add('boot', { // RENAME to: full boot from cold + // start an engine and await backchat to upsert + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + await factory() + deferred.resolve() + }, + }) + .add('session start', { // RENAME to: new thread + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + await backchatStartThread.newThread() + deferred.resolve() + }, + }) + .add('session reload', { // RENAME to: backchat reload + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + await Backchat.upsert( + machineEngine, + machineEnginePrivateKey, + backchatReload.id, + ) + deferred.resolve() + }, + }) + // given an engine that is provisioned, how long to recover the super user ? + + // multihop ping + // write one file then return + // write a deeply nested file then return + + // read a shallow file + // read a deeply nested file externally + // time delay to a splice + + // make a single new daemon branch + // make several new daemon branches + + // run a benchmark from inside an isolate, skipping pierce + // ? how can we map how long a branch takes to make ? + + .add('cold ping', { // RENAME to: create backchat then ping + // make a new session + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + const backchat = await Backchat.upsert( + machineEngine, + machineEnginePrivateKey, + ) + const fixture = await backchat.actions('io-fixture') + const result = await fixture.local({}) + assert(result === 'local reply') + deferred.resolve() + }, + }) + .add('hot ping', { // RENAME to: ping local + // use an existing session + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + const result = await hotPingActions.local({}) + assert(result === 'local reply') + deferred.resolve() + }, + }) + // try get max thruput by not waiting for things to complete ? + // .add('publish', { + // make a new repo ? + // defer: true, + // fn: async (deferred) => { + // await publish() + // deferred.resolve() + // }, + // }) + .add('install', { // RENAME to: init repo + // time how long to install a new multi user app + defer: true, + fn: async (deferred: Benchmark.Deferred) => { + const repo = 'install/' + installCounter++ + await install.init({ repo }) + deferred.resolve() + }, + }) + // .add('add customer', { + // ignoring the NL operations, this is adding raw data + // defer: true, + // fn: async (deferred) => { + // await addCustomer() + // deferred.resolve() + // }, + // }) + // .add('block making', { + // make a new commit ? + // defer: true, + // fn: async (deferred) => { + // await makePulse() + // deferred.resolve() + // }, + // }) + // .add('unsigned block making', { + // make a new commit without signing it + // defer: true, + // fn: async (deferred) => { + // await makeUnsigned() + // deferred.resolve() + // }, + // }) + .on('cycle', (event: Benchmark.Event) => { + console.log(String(event.target)) + }) + .on('complete', async function () { + log('cleaning up') + let count = 0 + for (const engine of engines) { + log('stopping engine', ++count) + await engine.stop() + } + }) + .run({ async: false }) + +// then do a cloud version running on a sacrificial deployment +// hundred pings outside +// hundred pings inside +// time to restart an existing session + +// ? can a GUTS framework be made to run benchmarks in different environments + +// bulk copy files speed test, as well as single binary file test +// bulk file read without cache. + +// multiple stage ping test +// branch and some multiple requests test +// also test the branch task going sequentially diff --git a/_import-artifact/constants.ts b/_import-artifact/constants.ts new file mode 100644 index 000000000000..4662f775b509 --- /dev/null +++ b/_import-artifact/constants.ts @@ -0,0 +1,251 @@ +import IA from './isolate-api.ts' +export type { IA } +export const IO_PATH = '.io.json' +import { + type Backchat, + Change, + CommitObject, + EngineInterface, + IsolateReturn, + MetaPromise, + Outcome, + Params, + PID, + Pierce, + RemoteRequest, + Request, + SolidRequest, + UnsequencedRequest, +} from './api/types.ts' +import FS from '@/git/fs.ts' +import type DB from '@/db.ts' +import type Executor from '@/exe/exe.ts' +import { assert, equal } from '@utils' +import { JsonSchema7ObjectType, zodToJsonSchema } from 'zod-to-json-schema' +import { ZodObject, ZodSchema, ZodUnknown } from 'zod' + +export const REPO_LOCK_TIMEOUT_MS = 5000 + +/** Artifact Context, including the db and executor */ +export type C = { + db: DB + exe: Executor + aesKey?: string + seed?: Deno.KvEntry[] +} + +/** Extends the actions api to be the isolate api */ +export type Functions = { + [K in keyof Api]: Function +} + +type Function = T extends (...args: infer Args) => infer R + ? (...args: [...Args, IA]) => R + : never + +export type IsolateFunction = { + (): unknown | Promise + (...args: [Params]): unknown | Promise + (...args: [Params, IA]): unknown | Promise +} + +export type IsolateFunctions = { + [key: string]: IsolateFunction +} +export type IsolateLifecycle = { + '@@mount'?: (api: IA) => Promise | IsolateReturn + '@@unmount'?: (api: IA) => Promise | IsolateReturn +} +export type Isolate = { + parameters: Record>> + returns: Record + functions: IsolateFunctions + lifecycles?: IsolateLifecycle +} + +export type Poolable = MergeReply | RemoteRequest +export type Reply = SolidReply | MergeReply +export type EffectRequest = { + target: PID + /** + * The hash of the function that was called, to ensure repeatability + */ + fingerprint: string + sequence: number +} +export type SolidReply = { + target: PID + sequence: number + outcome: Outcome +} +export type MergeReply = SolidReply & { + /** + * Where did this merge reply come from? + */ + source: PID + /** + * The commit that solidified this merge reply, which is used as a merge + * parent in the recipient branch, so that any changes to the fs can be + * accessed and so the provenance of the action is included. + */ + commit: string +} +export type IsolatePromise = + | BareIsolatePromise + | PromisedIsolatePromise + | SettledIsolatePromise +type BareIsolatePromise = { + request: UnsequencedRequest +} +export type PromisedIsolatePromise = BareIsolatePromise & { + promise: MetaPromise + resolve: (value: T) => void + reject: (error: Error) => void +} +export type SettledIsolatePromise = + & (BareIsolatePromise | PromisedIsolatePromise) + & { + outcome: Outcome + /** if an outcome is given, there must be a commit associated with it, so + * that the execution filesystem can be ticked forwards */ + commit: string + /** If the outcome was the result of a branch returning, then the parent + * commit of that branch is given here */ + parent?: string + } +export const isSettledIsolatePromise = ( + p: IsolatePromise, +): p is SettledIsolatePromise => { + return 'outcome' in p +} +export type Solids = { + oid: string + commit: CommitObject + /** Changed files in this commit. Empty change signals deletion. */ + changes: { [key: string]: Change } + exe?: { request: SolidRequest; sequence: number } + branches: number[] + poolables: (MergeReply | RemoteRequest)[] + deletes: { pid: PID; commit: string }[] +} +export type Branched = { + /** The first request in the new branch */ + origin: SolidRequest + /** The branch PID that needs to be created in /.git/refs */ + pid: PID + /** The head of the new branch that needs to be created in /.git/refs */ + head: string +} +export type ExeResult = ExeSettled | ExePending +type ExeResultBase = { + /** + * The last filesystem that was modified during the execution run. The FS + * might have been bumped forwards if accumulations occurred. + */ + fs: FS + /** If this is a side effect request, this is the lock held by for it */ + effectsLock?: Deno.KvEntry +} +type ExeSettled = ExeResultBase & { + reply: SolidReply +} +type ExePending = ExeResultBase & { + pending: Pending +} +export type Pending = { + /** The commit that caused the requests to be generated */ + commit: string + /** The requests that were generated by the latest round of execution */ + requests: UnsequencedRequest[] + /** The sequence number to accumulate the pending requests against */ + sequence: number +} + +export const isMergeReply = ( + poolable: Poolable | SolidReply, +): poolable is MergeReply => { + return 'commit' in poolable && 'outcome' in poolable +} +export const isReply = ( + poolable: Poolable | Pierce | SolidReply, +): poolable is Reply => { + return 'outcome' in poolable +} +export const isRemoteRequest = ( + poolable: Request, +): poolable is RemoteRequest => { + return 'commit' in poolable && 'proctype' in poolable +} +/** + * Messages that go on the queue are one of three types. Each one is an + * operation that will result in a new commit, atomically. Each operation is + * able to detect when it is a duplicate task due to duplicate message delivery. + * Each task will continue to retry until it is successful, as long as its check + * for duplication reassures it to keep trying. + */ +export type QueueMessage = QueuePool | QueueExe | QueueBranch +export enum QueueMessageType { + POOL = 'pool', + EXECUTION = 'exe', + BRANCH = 'branch', +} +export type QueuePool = { + type: QueueMessageType.POOL + pid: PID +} +export type QueueExe = { + type: QueueMessageType.EXECUTION + pid: PID + commit: string + sequence: number +} +export type QueueBranch = { + type: QueueMessageType.BRANCH + parentCommit: string + parentPid: PID + sequence: number +} +export const isQueuePool = (m: QueueMessage): m is QueuePool => { + return m.type === QueueMessageType.POOL +} +export const isQueueExe = (m: QueueMessage): m is QueueExe => { + return m.type === QueueMessageType.EXECUTION +} +export const isQueueBranch = (m: QueueMessage): m is QueueBranch => { + return m.type === QueueMessageType.BRANCH +} +export const isChildOf = (child: PID, parent: PID) => { + const childParent = { ...child, branches: child.branches.slice(0, -1) } + return equal(childParent, parent) +} +export const isBaseRepo = (pid: PID) => pid.branches.length === 1 + +export type Provisioner = (superBackchat: Backchat) => Promise + +export type CradleMaker = ( + t: Deno.TestContext, + /** The file url that the snapshots are associated with */ + snapshotsFor: string, + updateSnapshots?: 'updateSnapshots', + init?: Provisioner, +) => Promise< + { + backchat: Backchat + engine: EngineInterface + privateKey: string + [Symbol.asyncDispose](): Promise + } +> + +export const toApi = (parameters: Record) => { + const api: Record = {} + for (const key of Object.keys(parameters)) { + const schema = zodToJsonSchema(parameters[key]) + delete schema.$schema + assert('properties' in schema, 'schema must have properties') + api[key] = schema + } + return api +} + +export * from './api/types.ts' diff --git a/_import-artifact/cradle-maker.ts b/_import-artifact/cradle-maker.ts new file mode 100644 index 000000000000..85f05ad6f5e6 --- /dev/null +++ b/_import-artifact/cradle-maker.ts @@ -0,0 +1,60 @@ +import { Engine } from './engine.ts' +import { Crypto } from './api/crypto.ts' +import DB from '@/db.ts' +import { type CradleMaker, Provisioner } from '@/constants.ts' +import { Backchat } from './api/client-backchat.ts' +import { mockCreator } from '@/isolates/utils/mocker.ts' +import z from 'zod' +import { peekRandomnessCount, randomness } from '@/api/randomness.ts' + +const superuserKey = Crypto.generatePrivateKey() +const aesKey = DB.generateAesKey() +const privateKey = Crypto.generatePrivateKey() + +type SeedSet = { + count: number + seed: Deno.KvEntry[] + backchatId: string +} +const seeds = new Map() + +export const cradleMaker: CradleMaker = async (t, url, update, init) => { + const mock = mockCreator(z.unknown()) + t.origin = url + mock.useRecorder(t, update) + + const seedSet = seeds.get(init) + const seed = seedSet?.seed + const backchatId = seedSet?.backchatId + + const engine = await Engine.provision(superuserKey, aesKey, init, seed) + const backchat = await Backchat.upsert(engine, privateKey, backchatId) + + if (!seedSet) { + seeds.set(init, { + count: peekRandomnessCount(), + seed: await engine.dump(), + backchatId: backchat.id, + }) + } else { + while (peekRandomnessCount() < seedSet.count) { + // must update the randomness seed if the db seed is injected + randomness() + } + } + + const original = engine.stop.bind(engine) + engine.stop = async () => { + if (t) { + mock.teardown() + } + await original() + } + + return { + backchat, + engine, + privateKey, + [Symbol.asyncDispose]: () => engine.stop(), + } +} diff --git a/_import-artifact/db.test.ts b/_import-artifact/db.test.ts new file mode 100644 index 000000000000..96693a951138 --- /dev/null +++ b/_import-artifact/db.test.ts @@ -0,0 +1,8 @@ +import { expect, isKvTestMode } from '@utils' +import DB from '@/db.ts' + +Deno.test('db', async () => { + const db = await DB.create(DB.generateAesKey()) + expect(isKvTestMode()).toBeTruthy() + db.stop() +}) diff --git a/_import-artifact/db.ts b/_import-artifact/db.ts new file mode 100644 index 000000000000..c7bfdebf3640 --- /dev/null +++ b/_import-artifact/db.ts @@ -0,0 +1,509 @@ +import { pushable } from 'it-pushable' +import { BLOB_META_KEY } from '@kitsonk/kv-toolbox/blob' +import { CryptoKv, generateKey } from '@kitsonk/kv-toolbox/crypto' +import * as keys from '@/keys.ts' +import { + freezePid, + MergeReply, + PID, + Poolable, + PooledRef, + pooledRef, + print, + RemoteRequest, + REPO_LOCK_TIMEOUT_MS, + sha1, + Splice, +} from '@/constants.ts' +import { assert, Debug, equal, openKv, posix } from '@utils' +import { Atomic } from './atomic.ts' +import { QueueMessage } from '@/constants.ts' +import { decodeTime, ulid } from 'ulid' +import FS from '@/git/fs.ts' +import IOChannel from '@io/io-channel.ts' + +const log = Debug('AI:db') +export default class DB { + #kvStore: Deno.Kv + #cryptoKv: CryptoKv + #abort = new AbortController() + #aborts = new Set() + + private constructor(kv: Deno.Kv, aesKey: string) { + this.#kvStore = kv + this.#cryptoKv = new CryptoKv(kv, aesKey) + } + static async create(aesKey: string, seed?: Deno.KvEntry[]) { + const kv = await openKv() + watchUndelivered(kv) + const db = new DB(kv, aesKey) + if (seed) { + await db.load(seed) + } + return db + } + static generateAesKey() { + return generateKey() + } + get #kv() { + if (this.#abort.signal.aborted) { + throw new Error('DB is closed') + } + return this.#kvStore + } + stop() { + if (this.#abort.signal.aborted) { + return + } + const kv = this.#kv + this.#abort.abort() + for (const abort of this.#aborts) { + abort.abort() + } + return kv.close() + } + async hasPoolables(target: PID) { + const counterKey = keys.getPoolCounterKey(target) + const markerKey = keys.getPoolMarkerKey(target) + const many = [counterKey, markerKey] + const [counter, marker] = await this.#kv.getMany(many) + return hasPoolables(counter, marker) + } + async getPooledActions(target: PID) { + // TODO should pooled actions be an FS level action ? + const prefix = keys.getPoolKeyPrefix(target) + log('getPooledActions %o', prefix) + const entries = this.#kv.list({ prefix }, { batchSize: 1000 }) + const poolKeys = [] + const pool: Poolable[] = [] + for await (const entry of entries) { + // TODO test if range would not get deeply nested items + if (entry.key.length !== prefix.length + 1) { + continue + } + const id = entry.key[entry.key.length - 1] + if (id === keys.POOL_COUNTER || id === keys.POOL_MARKER) { + continue + } + poolKeys.push(entry.key) + const ref = pooledRef.parse(entry.value) + const poolable = await this.readPoolable(target, ref) + assert(poolable, 'Invalid poolable: ' + entry.key) + pool.push(poolable) + } + log('getPooledActions done %o', poolKeys.length) + return { poolKeys, pool } + } + async readHead(pid: PID): Promise { + const key = keys.getHeadKey(pid) + log('getHead %o', key) + const head = await this.#kv.get(key) + if (head.versionstamp) { + assert(sha1.test(head.value), 'Invalid head: ' + head.value) + return head.value + } + } + async lockRepo(pid: PID) { + const key = keys.getRepoLockKey(pid) + let lockId + do { + lockId = ulid() + const result = await this.#kv.atomic().check({ key, versionstamp: null }) + .set(key, lockId).commit() + if (result.ok) { + // TODO use keepalives to keep re-upping the key + break + } + + for await (const [lock] of this.#kv.watch<[string]>([key])) { + if (!lock.versionstamp) { + break + } + // TODO await the sooner of a new value or a timeout + const time = decodeTime(lock.value) + const remainingMs = REPO_LOCK_TIMEOUT_MS - (Date.now() - time) + if (remainingMs <= 0) { + const { ok } = await this.#kv.atomic().check(lock).delete(key) + .commit() + if (ok) { + console.error('OVERWROTE STALE REPO LOCK: ' + print(pid)) + break + } + } + } + } while (!this.#abort.signal.aborted) + return lockId + } + async releaseRepoLock(pid: PID, lockId: string) { + const key = keys.getRepoLockKey(pid) + const current = await this.#kv.get(key) + if (current.value === lockId) { + const { ok } = await this.#kv.atomic().check(current).delete(key).commit() + // TODO loop until confirmed as could error for non check reasons + return ok + } + return false + } + async rm(pid: PID) { + // TODO lock the repo and delete the lock last + const prefix = keys.getRepoBase(pid) + log('rm %o', prefix) + const all = this.#kv.list({ prefix }) + const deletes = [] + for await (const { key } of all) { + log('deleted: ', key) + deletes.push(this.#kv.delete(key)) + } + await Promise.all(deletes) + return !!deletes.length + } + async blobExists(key: Deno.KvKey) { + const meta = await this.#cryptoKv.getBlobMeta(key) + return !!meta.versionstamp + } + async blobGet(key: Deno.KvKey) { + const result = await this.#cryptoKv.getBlob(key) + return result + } + async blobSet(key: Deno.KvKey, value: ArrayBufferLike) { + await this.#cryptoKv.setBlob(key, value) + } + async listImmediateChildren(prefix: Deno.KvKey) { + const results = [] + for await (const item of this.#kv.list({ prefix })) { + const end = item.key.slice(prefix.length) + if (end.length === 1) { + results.push(end[0]) + } + if (end.length === 2) { + // TODO see what happens if the keys are named to clash with this + if (end[1] === BLOB_META_KEY) { + results.push(end[0]) + } + } + } + return results + } + listen(handler: (message: QueueMessage) => Promise) { + this.#kv.listenQueue(handler) + } + atomic() { + return Atomic.create(this.#kv) + } + + /** + * @param pid The branch to watch the effects lock for + * @param abort Trigger this abort if the lock is lost + * @returns + */ + async watchSideEffectsLock(pid: PID, abort: AbortController) { + // rudely snatch the lock + // TODO ensure that releasing the lock is part of atomics + const key = keys.getEffectsLockKey(pid) + const lockId = ulid() + const { versionstamp, ok } = await this.#kv.set(key, lockId) + assert(ok, 'Failed to set lock') + const stream = this.#kv.watch([key]) + const reader = stream.getReader() + abort.signal?.addEventListener('abort', () => { + reader.cancel() + }) + const read = async () => { + while (stream.locked) { + const { value, done } = await reader.read() + if (done) { + // should this be abort ? + return + } + const lock = value[0] + if (!lock.versionstamp) { + continue + } + if (lock.versionstamp === versionstamp) { + continue + } + abort.abort() + } + } + read() + return { key, value: lockId, versionstamp } + } + /** + * Will return an async iterable of Splices which are in order and do not skip + * any intermediaries. The parent Splice will always immediately precede the + * child Splice. + * + * If after is not provided, then we use the head to start with. If after is + * provided, then everything up to that point will be retrieved. + * @param {string} pid - The branch to watch + * @param {string} [path] - Optional path of a file to watch + * @param {string} [after] - Optional commit to start from + * @param {AbortSignal} [signal] - Optional abort signal + * @returns + */ + watchSplices(pid: PID, path?: string, after?: string, signal?: AbortSignal) { + freezePid(pid) + if (path) { + assert(!posix.isAbsolute(path), `path must be relative: ${path}`) + } + const abort = new AbortController() + this.#aborts.add(abort) + signal?.addEventListener('abort', () => { + abort.abort() + }) + const sink = pushable({ objectMode: true }) + const buffer = pushable>({ objectMode: true }) + if (after) { + assert(sha1.test(after), 'Invalid after: ' + after) + buffer.push(this.getSplice(pid, after, path)) + } + abort.signal.addEventListener('abort', () => { + this.#aborts.delete(abort) + sink.return() + buffer.return() + }) + + const watch = this.#kv.watch([keys.getHeadKey(pid)]) + const pipe = async () => { + for await (const [result] of streamToIt(watch, abort.signal)) { + if (!result.versionstamp) { + continue + } + const commit = result.value + if (commit === after) { + continue + } + buffer.push(this.getSplice(pid, commit, path)) + } + } + pipe().catch(buffer.throw) + const drain = async () => { + let last: Splice | undefined + for await (const promise of buffer) { + const splice = await promise + if (!last) { + if (after) { + assert(after === splice.oid, 'after mismatch') + } else { + sink.push(splice) + } + last = splice + continue + } + + const splices = [splice] + while (splices[0].commit.parent[0] !== last.oid) { + // TODO once piece replies are tracked directly, this can be removed + // catch up should be the responsibility of the client, not the server + console.error('splice race', print(splice.pid), splices.length) + const primeParent = splices[0].commit.parent[0] + const next = await this.getSplice(pid, primeParent, path) + splices.unshift(next) + } + last = splice + splices.map((s) => sink.push(s)) + } + } + drain().catch(sink.throw) + return sink as AsyncIterable + } + // TODO add aborts here, or have a master promise that is awaited + async getSplice(pid: PID, oid: string, path?: string) { + const fs = FS.open(pid, oid, this) + const commit = await fs.getCommit() + const timestamp = commit.committer.timestamp * 1000 + const splice: Splice = { pid, oid, commit, timestamp, changes: {} } + if (path) { + // TODO make paths be an array + if (await fs.exists(path)) { + const oid = await fs.readOid(path) + const patch = await fs.read(path) // TODO check caching makes this fast + splice.changes[path] = { oid, patch } + } + } + return splice + } + + async hasHomeAddress() { + const home = await this.#kv.get(keys.HOME_ADDRESS) + return !!home.versionstamp + } + async setHomeAddress(pid: PID) { + const empty = { key: keys.HOME_ADDRESS, versionstamp: null } + const result = await this.#kv.atomic().check(empty).set( + keys.HOME_ADDRESS, + pid, + ).commit() + if (!result.ok) { + throw new Error('Home address already set') + } + } + async awaitHomeAddress() { + const home = await this.#kv.get(keys.HOME_ADDRESS) + if (home.versionstamp) { + return home.value + } + const watch = this.#kv.watch([keys.HOME_ADDRESS]) + for await (const [result] of streamToIt(watch, this.#abort.signal)) { + if (result.versionstamp) { + return result.value + } + } + if (!this.#abort.signal.aborted) { + throw new Error('Home address error') + } + } + async lockDbProvisioning() { + const lockId = ulid() + const key = keys.DB_LOCK + const result = await this.#kv.atomic().check({ key, versionstamp: null }) + .set(key, lockId) + .commit() + if (result.ok) { + return lockId + } + } + async unlockDbProvisioning(lockId: string) { + const key = keys.DB_LOCK + const entry = await this.#kv.get(key) + if (entry.value !== lockId) { + console.error(`lock was taken from ${lockId} to ${entry.value}`) + return + } + const result = await this.#kv.atomic() + .check(entry) + .delete(key) + .commit() + if (!result.ok) { + console.error('unlockDB failed', lockId) + } + } + async awaitDbProvisioning() { + const key = keys.DB_LOCK + const watch = this.#kv.watch([key]) + for await (const [entry] of streamToIt(watch, this.#abort.signal)) { + if (!entry.versionstamp) { + return + } + } + } + async drop() { + const all = this.#kv.list({ prefix: [] }, { batchSize: 1000 }) + const promises = [] + let count = 0 + for await (const { key } of all) { + if (!equal(key, keys.DB_LOCK)) { + promises.push(this.#kv.delete(key)) + } + if (promises.length % 1000 === 0) { + count += promises.length + await Promise.all(promises) + promises.length = 0 + if (count > 0) { + console.log('db drop progress:', count) + } + } + } + await Promise.all(promises) + } + async dump() { + const all = this.#kv.list({ prefix: [] }, { batchSize: 1000 }) + const entries = [] + for await (const entry of all) { + entries.push(entry) + } + return entries + } + async load(entries: Deno.KvEntry[]) { + let atomic = this.#kv.atomic() + let count = 0 + for (const entry of entries) { + count++ + // mutation limit is 1000, but there is also a size limit + if (count % 500 === 0) { + await atomic.commit() + atomic = this.#kv.atomic() + } + atomic.set(entry.key, entry.value) + } + await atomic.commit() + } + async readPoolable(target: PID, ref: PooledRef) { + const { commit, sequence, source, isReply } = ref + const fs = FS.open(source, commit, this) + const io = await IOChannel.loadWithoutPurge(fs) + if (isReply) { + const requestSource = target + const outcome = io.getOutcomeBySource(requestSource, sequence) + const reply: MergeReply = { target, sequence, outcome, source, commit } + return reply + } else { + const request = io.getRequest(sequence) + const remoteRequest: RemoteRequest = { + ...request, + source, + commit, + } + return remoteRequest + } + } +} + +const watchUndelivered = async (kv: Deno.Kv) => { + for await (const [undelivered] of kv.watch([keys.UNDELIVERED])) { + if (undelivered.versionstamp) { + console.error('undelivered', undelivered.key, undelivered.value) + const timestamp = new Date().toISOString() + await kv.set([...keys.UNDELIVERED, timestamp], undelivered.value) + await kv.delete(undelivered.key) + } + } +} + +const streamToIt = (stream: ReadableStream, signal?: AbortSignal) => { + const reader = stream.getReader() + return { + async *[Symbol.asyncIterator]() { + if (signal?.aborted) { + reader.releaseLock() + return + } + signal?.addEventListener('abort', () => { + reader.releaseLock() + stream.cancel() + return + }) + + try { + while (stream.locked) { + const { done, value } = await reader.read() + if (done) { + break + } + yield value + } + } finally { + reader.releaseLock() + } + }, + } +} +export const hasPoolables = ( + counter: Deno.KvEntryMaybe, + marker: Deno.KvEntryMaybe, + mayBeEmpty = false, +) => { + if (!counter.versionstamp) { + if (!mayBeEmpty) { + throw new Error('Pool does not exist: ' + counter.key.join('/')) + } + return false + } + if (!marker.versionstamp) { + return counter.value > 0 + } + if (counter.value < marker.value) { + throw new Error('invalid marker: ' + marker.key.join('/')) + } + return counter.value > marker.value +} diff --git a/_import-artifact/deno.json b/_import-artifact/deno.json new file mode 100644 index 000000000000..e88c998d896c --- /dev/null +++ b/_import-artifact/deno.json @@ -0,0 +1,79 @@ +{ + "name": "@dreamcatcher-ai/artifact", + "version": "0.0.1", + "lock": false, + "tasks": { + "test": "DENO_KV_PATH=:memory: deno test -A --coverage --clean --parallel", + "watch": "clear && DENO_KV_PATH=:memory: DEBUG_COLORS=1 deno test -A --watch", + "api": "DENO_KV_PATH=:memory: DEBUG_COLORS=1 deno run -A --watch --inspect-wait main.ts", + "check:types": "deno check **/*.ts", + "check": "deno fmt --check && deno lint && deno task check:types", + "ok": "deno task check && deno task test", + "cov:gen": "deno coverage coverage --lcov --exclude='.tsx' --output=cov.lcov", + "tc": "DEBUG_COLORS=1 deno test -A --unsafely-ignore-certificate-errors --trace-leaks ./server/cloud.ts", + "db:drop": "DEBUG_COLORS=1 deno run -A ./server/db-drop.ts", + "db:tools": "DEBUG_COLORS=1 deno run -A ./server/db-tools.ts", + "keygen": "DEBUG_COLORS=1 deno run ./server/keygen.ts", + "bw": "DENO_KV_PATH=:memory: DEBUG_COLORS=1 deno run -A --check --watch benchmarks/bench.ts", + "bench": "DENO_KV_PATH=:memory: DEBUG_COLORS=1 deno run -A benchmarks/bench.ts" + }, + "imports": { + "$git": "npm:isomorphic-git", + "@/": "./", + "@deno/kv-oauth": "jsr:@deno/kv-oauth", + "@io/": "./io/", + "@kitsonk/kv-toolbox": "jsr:@kitsonk/kv-toolbox", + "@noble/hashes": "npm:@noble/hashes", + "@noble/secp256k1": "npm:@noble/secp256k1", + "@scure/base": "npm:@scure/base", + "@sindresorhus/is": "npm:@sindresorhus/is", + "@std/assert": "jsr:@std/assert", + "@std/async": "jsr:@std/async", + "@std/dotenv": "jsr:@std/dotenv", + "@std/expect": "jsr:@std/expect", + "@std/path": "jsr:@std/path@^1.0.6", + "@types/benchmark": "npm:@types/benchmark", + "@utils": "./utils.ts", + "benchmark": "npm:benchmark", + "chalk": "npm:chalk", + "debug": "npm:debug", + "diff": "npm:diff", + "diff3": "npm:diff3", + "eventsource-parser/stream": "npm:eventsource-parser/stream", + "fast-deep-equal/es6": "npm:fast-deep-equal/es6/index.js", + "gray-matter": "npm:gray-matter", + "hono": "jsr:@hono/hono", + "imagescript": "https://deno.land/x/imagescript@1.3.0/mod.ts", + "it-pushable": "npm:it-pushable", + "lodash.get": "npm:lodash.get@^4.4.2", + "lodash.merge": "npm:lodash.merge", + "lodash.set": "npm:lodash.set@^4.3.2", + "micromatch": "npm:micromatch", + "multiformats": "npm:multiformats", + "openai": "npm:openai", + "serialize-error": "npm:serialize-error", + "ulid": "npm:ulid", + "youtube-caption-extractor": "npm:youtube-caption-extractor", + "zod": "npm:zod", + "zod-to-json-schema": "npm:zod-to-json-schema" + }, + "exclude": ["cov/"], + "lint": { + "rules": { + "tags": ["recommended"], + "include": ["no-boolean-literal-for-arguments"] + } + }, + "scopes": {}, + "fmt": { + "include": ["**/*.ts", "**/*.tsx"], + "exclude": ["coverage", "cov", "node_modules", "HAL", "tmp"], + "semiColons": false, + "singleQuote": true, + "proseWrap": "always", + "indentWidth": 2, + "useTabs": false + }, + "unstable": ["kv"], + "exports": "./main.ts" +} diff --git a/_import-artifact/engine.test.ts b/_import-artifact/engine.test.ts new file mode 100644 index 000000000000..9d169b61cc5e --- /dev/null +++ b/_import-artifact/engine.test.ts @@ -0,0 +1,46 @@ +import { Backchat } from './api/client-backchat.ts' +import guts from './guts/guts.ts' +import { expect, log } from '@utils' +import { cradleMaker } from '@/cradle-maker.ts' +import * as backchat from './isolates/backchat.ts' +import { getBaseName } from '@/constants.ts' + +Deno.test('cradle', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { engine, privateKey, backchat } = cradle + await t.step('basic', async () => { + const result = await backchat.ping({ data: 'hello' }) + expect(result).toBe('hello') + + await backchat.rm({ repo: 'dreamcatcher-tech/HAL' }) + log('backchat', backchat.pid) + const clone = await backchat.clone({ repo: 'dreamcatcher-tech/HAL' }) + log('clone result', clone) + expect(clone.pid).toBeDefined() + expect(clone.pid.account).toBe('dreamcatcher-tech') + expect(typeof clone.head).toBe('string') + }) + let next: Backchat + await t.step('second backchat', async () => { + next = await Backchat.upsert(engine, privateKey) + expect(next.pid).not.toEqual(backchat.pid) + expect(next.id).not.toEqual(backchat.id) + }) + await t.step('new thread', async () => { + const firstBase = await backchat.threadPID() + const secondBase = await next.threadPID() + expect(firstBase).not.toEqual(secondBase) + + const { newThread } = await next.actions('backchat') + const threadId = await newThread({}) + const thirdBase = await next.threadPID() + expect(getBaseName(thirdBase)).toEqual(threadId) + expect(secondBase).not.toEqual(thirdBase) + }) + + // test intercepting backchat text +}) + +guts(cradleMaker) + +// TODO confirm cannot delete the system chain diff --git a/_import-artifact/engine.ts b/_import-artifact/engine.ts new file mode 100644 index 000000000000..6b3dc7ecc7ee --- /dev/null +++ b/_import-artifact/engine.ts @@ -0,0 +1,344 @@ +import { transcribe } from './isolates/ai-completions.ts' +import Compartment from './io/compartment.ts' +import '@std/dotenv/load' +import { + addBranches, + backchatIdRegex, + C, + EngineInterface, + freezePid, + generateActorId, + generateBackchatId, + HAL, + JsonValue, + PID, + Pierce, + print, + Proctype, + Provisioner, +} from './constants.ts' +import * as actor from './api/isolates/actor.ts' +import IA from './isolate-api.ts' +import { assert, Debug, posix } from '@utils' +import FS from '@/git/fs.ts' +import * as artifact from '@/isolates/artifact.ts' +import { ulid } from 'ulid' +import { Crypto } from './api/crypto.ts' +import { PierceWatcher } from './api/watcher.ts' +import * as actors from './isolates/actors.ts' +import { Backchat } from './api/client-backchat.ts' +import { tryActorId } from '@/isolates/machines.ts' +const log = Debug('AI:engine') +type Seed = Deno.KvEntry[] + +export class Engine implements EngineInterface { + #superuserKey: string + #compartment: Compartment + #api: IA + #pierce: artifact.Api['pierce'] + #homeAddress: PID | undefined + #githubAddress: PID | undefined + #abort = new AbortController() + + private constructor( + compartment: Compartment, + api: IA, + superuserKey: string, + ) { + this.#compartment = compartment + this.#api = api + const functions = compartment.functions(api) + this.#pierce = functions.pierce + this.#superuserKey = superuserKey + } + static async boot(superuserKey: string, aesKey: string, seed?: Seed) { + const compartment = await Compartment.create('artifact') + const api = IA.createContext() + api.context = { aesKey, seed } + await compartment.mount(api) + const engine = new Engine(compartment, api, superuserKey) + return engine + } + static async provision( + superuserKey: string, + aesKey: string, + init?: Provisioner, + seed?: Seed, + ) { + // TODO collapse this to be same as server, so never gets called + const engine = await Engine.boot(superuserKey, aesKey, seed) + await engine.ensureHomeAddress(init) + return engine + } + superUser(): Backchat { + const crypto = Crypto.load(this.#superuserKey) + return Backchat.superuser(this, crypto) + } + get #isDropping() { + const toDelete = Deno.env.get('DROP_HOME') || '' + const isDropping = toDelete.trim() === this.homeAddress.repoId + isDropping && log('isDropping', isDropping) + return isDropping + } + get context() { + return this.#api.context + } + get homeAddress() { + if (!this.#homeAddress) { + throw new Error('home not provisioned') + } + return this.#homeAddress + } + get abortSignal() { + return this.#abort.signal + } + get isProvisioned() { + return !!this.#homeAddress && !!this.#githubAddress + } + async upsertBackchat(machineId: string, resume?: string) { + // TODO handshake to prove the machineId is valid + Crypto.assert(machineId) + assert(!resume || backchatIdRegex.test(resume), 'invalid resume') + const { db } = artifact.sanitizeContext(this.#api) + + const machines = addBranches(this.homeAddress, 'machines') + const machineFs = await FS.openHead(machines, db) + let actor: PID + + const actorId = await tryActorId(machineId, machineFs) + if (actorId) { + actor = addBranches(this.homeAddress, actorId) + } else { + return this.#createActor(machineId) + } + if (resume) { + const resumedBackchat = addBranches(actor, resume) + if (await db.readHead(resumedBackchat)) { + return resumedBackchat + } + } + return this.#createBackchat(actor) + } + async #createActor(machineId: string) { + Crypto.assert(machineId) + const actorId = generateActorId() + const actor = addBranches(this.homeAddress, actorId) + const backchatId = generateBackchatId() + const backchat = addBranches(actor, backchatId) + + const target = this.homeAddress + const su = this.superUser() + const actions = await su.actions('actors', { target }) + await actions.createActor({ actorId, machineId, backchatId }) + + return backchat + } + async #createBackchat(target: PID) { + // TODO assert is actor PID + // TODO this should really be done by the actor + // so the request should include a pierce to create a new one in case the + // resume doesn't work, since SU takes time + const su = this.superUser() + const { backchat } = await su.actions('actor', { + target, + }) + const backchatId = generateBackchatId() + const pid = await backchat({ backchatId }) + return freezePid(pid) + } + async ensureHomeAddress(init?: Provisioner) { + if (this.#homeAddress) { + return + } + + log('ensureHomeAddress querying db') + const { db } = artifact.sanitizeContext(this.#api) + if (await db.hasHomeAddress()) { + this.#homeAddress = await db.awaitHomeAddress() + log('homeAddress found in db', print(this.#homeAddress)) + } + + if (!this.#homeAddress || this.#isDropping) { + const lockId = await db.lockDbProvisioning() + if (lockId) { + log('dropping db') + await this.#dropDB() + log('db drop complete') + + log('initializing homeAddress') + this.#homeAddress = await this.#initHome() + await db.setHomeAddress(this.#homeAddress) + log('homeAddress initialized to:', print(this.#homeAddress)) + + await this.#provision(init) + await db.unlockDbProvisioning(lockId) + log('unlocked db', lockId) + } + } + // importantly, this blocks only web requests, not queue processing + await db.awaitDbProvisioning() + this.#homeAddress = await db.awaitHomeAddress() + log('db unlocked - home address:', print(this.#homeAddress)) + } + async stop() { + this.#abort.abort() + await this.#compartment.unmount(this.#api) + // attempt to let the subtle digest call clean up + await new Promise((resolve) => setTimeout(() => resolve())) + } + async #initHome() { + // queue processing cannot begin without a home repo + log('initHome') + const { db } = artifact.sanitizeContext(this.#api) + const repo = `${HAL.account}/${HAL.repository}` + const fs = await FS.clone(repo, db) + log('initialized home', print(fs.pid)) + return fs.pid + } + async #provision(init?: Provisioner) { + const abort = new AbortController() + + const watcher = PierceWatcher.create(abort.signal, this, this.homeAddress) + watcher.watchPierces() + const { machineId: superuser } = Crypto.load(this.#superuserKey) + const pierce: Pierce = { + target: this.homeAddress, + ulid: ulid(), + isolate: 'actors', + functionName: '@@install', + params: { superuser }, + proctype: Proctype.enum.SERIAL, + } + const promise = watcher.watch(pierce.ulid) + + // notably, this is the only non terminal pierce in the whole system + await this.pierce(pierce) + // TODO sign the pierce since superuser is already present + log('pierced', print(this.homeAddress)) + await promise + abort.abort() // TODO make this a method on the watcher + const su = this.superUser() + log('superuser is', print(su.pid)) + + if (!init) { + log('no init function - returning') + return + } + + log('provisioning') + await init(su) + log('provisioned') + } + ping(data?: JsonValue): Promise { + log('ping', data) + return Promise.resolve(data) + // TODO return some info about the deployment + // version, deployment location, etc + // if you want to ping in a chain, use an isolate + } + async apiSchema(isolate: string) { + const compartment = await Compartment.create(isolate) + return compartment.api + } + async transcribe(audio: File) { + assert(audio instanceof File, 'audio must be a File') + const text = await transcribe(audio) + return { text } + } + async pierce(pierce: Pierce) { + await this.#pierce({ pierce }) + } + watch(pid: PID, path?: string, after?: string, signal?: AbortSignal) { + // TODO read should take a triad and should not require strict sequence + freezePid(pid) + assert(!path || !posix.isAbsolute(path), `path must be relative: ${path}`) + + const db = this.#api.context.db + assert(db, 'db not found') + const abort = new AbortController() + signal?.addEventListener('abort', () => abort.abort()) + this.#abort.signal.addEventListener('abort', () => abort.abort()) + return db.watchSplices(pid, path, after, abort.signal) + } + async splice( + target: PID, + opts: { commit?: string; path?: string; count?: number } = {}, + ) { + let { commit, path, count = 1 } = opts + log('splice', print(target), commit, path, count) + const db = this.#api.context.db + assert(db, 'db not found') + + if (!commit) { + commit = await db.readHead(target) + } + if (!commit) { + throw new Error('commit not found: ' + print(target)) + } + const splices = [] + const commits = [commit] + while (splices.length < count) { + const commit = commits.shift() + if (!commit) { + break + } + const splice = await db.getSplice(target, commit, path) + splices.push(splice) + commits.push(...splice.commit.parent) + } + + return splices + } + async read(path: string, pid: PID, commit?: string) { + const fs = await this.#openFs(pid, commit) + log('read', path, print(pid)) + return fs.read(path) + } + async readTree(path: string, pid: PID, commit?: string) { + const fs = await this.#openFs(pid, commit) + log('readTree', path, print(pid)) + return fs.readTree(path) + } + async readJSON(path: string, pid: PID, commit?: string) { + const fs = await this.#openFs(pid, commit) + log('readJSON', path, print(pid)) + return fs.readJSON(path) + } + async readBinary(path: string, pid: PID, commit?: string) { + const fs = await this.#openFs(pid, commit) + log('readBinary', path, print(pid)) + return fs.readBinary(path) + } + async #openFs(pid: PID, commit?: string) { + freezePid(pid) + + const db = this.#api.context.db + assert(db, 'db not found') + let fs + if (commit) { + fs = FS.open(pid, commit, db) + } else { + fs = await FS.openHead(pid, db) + } + return fs + } + async exists(path: string, pid: PID): Promise { + // TODO convert to triad + freezePid(pid) + + const db = this.#api.context.db + assert(db, 'db not found') + const fs = await FS.openHead(pid, db) + log('exists', path, print(pid)) + return fs.exists(path) + } + async #dropDB() { + log('dropping homeAddress', print(this.#homeAddress)) + const { db } = artifact.sanitizeContext(this.#api) + await db.drop() + } + dump() { + const { db } = artifact.sanitizeContext(this.#api) + return db.dump() + } +} diff --git a/_import-artifact/exe/accumulator.ts b/_import-artifact/exe/accumulator.ts new file mode 100644 index 000000000000..4058f7637e63 --- /dev/null +++ b/_import-artifact/exe/accumulator.ts @@ -0,0 +1,155 @@ +import { + META_SYMBOL, + PID, + PromisedIsolatePromise, + SettledIsolatePromise, + UnsequencedRequest, +} from '@/constants.ts' +import { IsolatePromise } from '@/constants.ts' +import { assert, deserializeError, equal, expect } from '@utils' +import FS from '@/git/fs.ts' + +export default class Accumulator { + #index = 0 + #buffer: IsolatePromise[] + #fs: FS + #highestFs: FS + #new: PromisedIsolatePromise[] = [] + #isActive = false + #trigger: (() => void) | undefined + private constructor(highestFs: FS, buffer: IsolatePromise[]) { + this.#buffer = buffer + this.#highestFs = highestFs + this.#fs = highestFs + const [first] = buffer + if (first) { + assert('commit' in first, 'first accumulation must have a commit') + this.#fs = highestFs.tick(first.commit) + } + } + static create(highestFs: FS, buffer: IsolatePromise[] = []) { + const acc = new Accumulator(highestFs, buffer) + return acc + } + get accumulations() { + return [...this.#new] + } + get fs() { + return this.#fs + } + push(promised: PromisedIsolatePromise) { + assert(this.isActive, 'Activity is denied') + assert(typeof this.#trigger === 'function', 'Trigger is not set') + this.#trigger() + this.#new.push(promised) + this.#buffer.push(promised) + } + #tickFs() { + const next = this.#buffer[this.#index] + if (next && 'commit' in next) { + this.#fs = this.#fs.tick(next.commit) + } else if (this.fs.oid !== this.#highestFs.oid) { + // we are at the final layer, so use the latest fs + this.#fs = this.#fs.tick(this.#highestFs.oid) + } + } + recover(request: UnsequencedRequest) { + assert(this.isActive, 'Activity is denied') + const index = this.#index++ + if (this.#buffer[index]) { + const recovered = this.#buffer[index] + if (!equal(recovered.request, request)) { + expect(recovered.request, 'Requests are not equal').toEqual(request) + } + if ('outcome' in recovered) { + this.#tickFs() + } + return recovered + } + } + isParent(parent: string) { + assert(this.isActive, 'Activity is denied') + for (const promise of this.#buffer) { + if ('parent' in promise) { + if (promise.parent === parent) { + return true + } + } + } + return false + } + activate(symbol: symbol) { + assert(!this.isActive, 'Activity is already active') + assert(!this.#trigger, 'Trigger is already set') + this.#isActive = true + return new Promise((resolve) => { + this.#trigger = () => resolve(symbol) + }) + } + deactivate() { + assert(this.isActive, 'Activity is not active') + this.#isActive = false + this.#trigger = undefined + } + get isActive() { + return this.#isActive + } + /** + * Used so that an execution can be paused, then receive replies from + * accumulated actions, then continue without restarting the execution. Makes + * it easier to debug these functions, but also can be faster to execute. + * This is a nice to have and the operation is equally capable of starting + * again, if we find ourselves replaying the operation with no existing cache. + * + * As new layers of the accumulation process occur, the filesystem object + * referenced by the isolate-api object will tick forwards. + * + * @param from The newer accumulator that should be copied in to the old one + */ + absorb(from: Accumulator) { + assert(!this.isActive, '"this" is already active') + assert(!from.isActive, '"from" is already active') + if (this === from) { + return + } + assert(this.#buffer.length <= from.#buffer.length, '"this" must be shorter') + this.#highestFs = from.#highestFs + + let index = 0 + for (const source of from.#buffer) { + const sink = this.#buffer[index++] + if (!sink) { + this.#buffer.push(source) + continue + } + if (!equal(source.request, sink.request)) { + expect(source.request).toEqual(sink.request) + } + if ('outcome' in sink) { + assert('outcome' in source, 'source has no outcome') + assert(equal(source.outcome, sink.outcome), 'outcomes are not equal') + assert(equal(source.commit, sink.commit), 'commits are not equal') + } else if ('outcome' in source) { + const settledSink = sink as SettledIsolatePromise + settledSink.outcome = source.outcome + settledSink.commit = source.commit + settledSink.parent = source.parent + } + if ('outcome' in sink && 'promise' in sink) { + sink.promise[META_SYMBOL] = { parent: sink.parent } + if (sink.outcome.error) { + sink.reject(deserializeError(sink.outcome.error)) + } else { + sink.resolve(sink.outcome.result) + } + } + } + this.#tickFs() + // TODO assert the #new matched the incoming updates exactly + this.#new = [] + } + isPidExists(pid: PID) { + // TODO log this action in the buffer and make it replayable + return this.fs.isPidExists(pid) + } +} diff --git a/_import-artifact/exe/exe.test.ts b/_import-artifact/exe/exe.test.ts new file mode 100644 index 000000000000..6f6530e75194 --- /dev/null +++ b/_import-artifact/exe/exe.test.ts @@ -0,0 +1,248 @@ +import IOChannel from '../io/io-channel.ts' +import FS from '../git/fs.ts' +import Executor from './exe.ts' +import { + C, + isPierceRequest, + PartialPID, + Proctype, + SolidRequest, +} from '@/constants.ts' +import { assert, expect, log } from '@utils' +import DB from '@/db.ts' +import { UnsequencedRequest } from '@/constants.ts' +import { Engine } from '@/engine.ts' +import { Api } from '@/isolates/io-fixture.ts' +import { fixedRandomness } from '@/api/randomness.ts' +import { cradleMaker } from '@/cradle-maker.ts' + +type PartialRequest = Omit + +const repoId = `rep_${fixedRandomness('exe-test')}` +const partialPid: PartialPID = { + account: 'exe', + repository: 'test', + branches: ['main'], +} +const source = { ...partialPid, repoId, account: 'higher' } +const partialRequest: PartialRequest = { + isolate: 'io-fixture', + functionName: 'local', + params: {}, + proctype: Proctype.enum.SERIAL, + source, + sequence: 0, +} +const mocks = async (initialRequest: PartialRequest) => { + const db = await DB.create(DB.generateAesKey()) + let fs = await FS.init(partialPid, db) + let io = await IOChannel.load(fs) + const request = { ...initialRequest, target: fs.pid } + io.addRequest(request) + io.setExecution() + io.save() + const { next } = await fs.writeCommitObject('mock exe') + fs = next + io = await IOChannel.load(fs) + const stop = () => db.stop() + const exe = Executor.createCacheContext() + const context: C = { db, exe } + return { request, context, exe, io, fs, db, stop } +} +Deno.test('simple', async (t) => { + const { request, context, exe, fs, stop } = await mocks(partialRequest) + await t.step('no accumulations', async () => { + const result = await exe.execute(request, fs.oid, context) + assert('reply' in result) + const { reply, fs: settledFs } = result + expect(reply.target).toEqual(fs.pid) + expect(reply.outcome).toEqual({ result: 'local reply' }) + expect(settledFs).toEqual(fs) + expect(settledFs.isChanged).toBeFalsy() + }) + stop() +}) +Deno.test('writes', async (t) => { + const write = { + ...partialRequest, + functionName: 'write', + params: { path: 'test.txt', content: 'hello' }, + } + const { request, context, exe, fs, stop } = await mocks(write) + await t.step('single file', async () => { + const result = await exe.execute(request, fs.oid, context) + assert('reply' in result) + const { reply, fs: settledFs } = result + expect(reply.target).toEqual(fs.pid) + expect(reply.outcome.result).toBeUndefined() + expect(settledFs).toEqual(fs) + expect(settledFs.upserts).toEqual(['test.txt']) + expect(settledFs.deletes).toHaveLength(0) + }) + stop() + // write, delete, write + // write, delete, + // delete existing file + // multiple writes to the same file + + // check the broadcasting is working + // throttle the broadcast updates - thrash to check it works +}) + +Deno.test('loopback', async () => { + const compound = { + ...partialRequest, + functionName: 'compound', + params: { target: source }, + } + const { request, context, exe, fs, stop } = await mocks(compound) + const result = await exe.execute(request, fs.oid, context) + expect('pending' in result).toBeTruthy() + stop() +}) + +Deno.test('compound', async (t) => { + const target = { + repoId: `rep_${fixedRandomness('compound')}`, + account: 'exe', + repository: 'other', + branches: ['other'], + } + const compound = { + isolate: 'io-fixture', + proctype: Proctype.enum.SERIAL, + source, + sequence: 0, + functionName: 'compound', + params: { target }, + } + const { request, context, exe, io, fs, stop } = await mocks(compound) + let unsequenced: UnsequencedRequest + await t.step('half done', async () => { + const result = await exe.execute(request, fs.oid, context) + expect(result).toHaveProperty('pending') + assert('pending' in result) + const { requests } = result.pending + expect(requests).toHaveLength(1) + unsequenced = requests[0] + log('internalRequest', unsequenced) + expect(unsequenced.target).toEqual(target) + }) + await t.step('reply using function cache', async () => { + const sequenced = io.addPending(0, fs.oid, [unsequenced]) + expect(sequenced).toHaveLength(1) + const target = sequenced[0].source + const reply = { + target, + source: target, + outcome: { result: 'compound reply' }, + sequence: sequenced[0].sequence, + commit: '9e77f98333897ce63bf993280994d5e96c1679ff', + } + + const savedRequest = io.reply(reply) + assert(!isPierceRequest(savedRequest)) + expect(savedRequest).toEqual(sequenced[0]) + io.setExecution() + io.save() + const { next } = await fs.writeCommitObject('function cache') + + const result = await exe.execute(request, next.oid, context) + expect('reply' in result).toBeTruthy() + }) + await t.step('reply from replay', async () => { + assert(request) + const io = await IOChannel.load(fs) + const sequenced = io.addPending(0, fs.oid, [request]) + expect(sequenced).toHaveLength(1) + const target = fs.pid + const reply = { + target, + source: target, + commit: '0088107ee2fe3d8350d64b8b0098081a3db08adc', + outcome: { result: 'compound reply' }, + sequence: sequenced[0].sequence, + } + io.reply(reply) + io.setExecution() + io.save() + const { next } = await fs.writeCommitObject('function replay') + + const c = { ...context, exe: Executor.createCacheContext() } + const result = await c.exe.execute(request, next.oid, c) + expect('reply' in result).toBeTruthy() + }) + stop() + + // multiple outstanding requests + // test that it will not get rerun unless all outstanding are returned + // test rejection + // dangling requests are awaited until function resolves + // branch request to self is ok + // test multiple cycles thru requests and replies + // test making different request between two invocations +}) + +// TODO convert this to use the cradle maker + +const provision = async (t: Deno.TestContext, withExeCache: boolean) => { + const cradle = await cradleMaker(t, import.meta.url) + if (!withExeCache) { + assert(cradle.engine instanceof Engine) + assert(cradle.engine.context.exe, 'exe should be defined') + cradle.engine.context.exe.disableFunctionCache() + } + return cradle +} + +for (const withExeCache of [true, false]) { + Deno.test(`commit spanning (cache: ${withExeCache}`, async (t) => { + await t.step(`function cache`, async () => { + await using cradle = await provision(t, withExeCache) + const { backchat } = cradle + + const { fileAccumulation } = await backchat.actions('io-fixture') + await fileAccumulation({ path: 'test.txt', content: 'hello', count: 3 }) + + let first + for await (const splice of backchat.watch(backchat.pid, 'test.txt')) { + first = splice + break + } + assert(first) + const file = first.changes['test.txt'].patch + assert(file) + log(file) + expect(file.split('\n')).toHaveLength(7) + }) + }) + + Deno.test(`looping accumulation (cache: ${withExeCache}`, async (t) => { + await t.step(`function cache ${withExeCache}`, async () => { + await using cradle = await provision(t, withExeCache) + const { backchat } = cradle + + const { pid } = backchat + const { loopAccumulation } = await backchat.actions( + 'io-fixture', + { target: pid }, + ) + await loopAccumulation({ path: 'test.txt', content: 'hello', count: 3 }) + + let first + for await (const splice of backchat.watch(pid, 'test.txt')) { + first = splice + break + } + assert(first) + const file = first.changes['test.txt'].patch + assert(file) + log(file) + expect(file.split('\n')).toHaveLength(9) + }) + }) +} + +// verify that pierce cannot interrupt a running in band accumulation +// test repeat calling should not corrupt the cache, and should return the same, +// even if the commit was several accumulations ago diff --git a/_import-artifact/exe/exe.ts b/_import-artifact/exe/exe.ts new file mode 100644 index 000000000000..cc10826c04ee --- /dev/null +++ b/_import-artifact/exe/exe.ts @@ -0,0 +1,138 @@ +import IOChannel from '../io/io-channel.ts' +import { getExeId } from '@/keys.ts' +import { + C, + ExeResult, + JsonValue, + Outcome, + SolidReply, + SolidRequest, +} from '@/constants.ts' +import IA from '../isolate-api.ts' +import Compartment from '../io/compartment.ts' +import { assert, Debug, equal, serializeError } from '@utils' +import Accumulator from '@/exe/accumulator.ts' +import FS from '@/git/fs.ts' +const log = Debug('AI:exe') + +export default class Executor { + #functions = new Map() + #functionCacheDisabled = false + static createCacheContext() { + return new Executor() + } + disableFunctionCache() { + this.#functionCacheDisabled = true + } + /** + * @param req + * @param commit The commit that caused this invocation, which might be the + * commit of the request, or the commit of the most recent accumulation that + * allowed this execution to continue. + * @param c + * @returns + */ + async execute(req: SolidRequest, commit: string, c: C): Promise { + const fs = FS.open(req.target, commit, c.db) + assert(equal(fs.pid, req.target), 'target is not self') + const io = await IOChannel.read(fs) + assert(io, 'io not found') + assert(io.isExecution(req), 'request is not callable') + log('request %o %o', req.isolate, req.functionName, commit) + + const accumulator = io.getAccumulator(fs) + + // if this is a side effect, we need to get the side effect lock + // then this check needs to be added into everything that the api does + // we need to start watching for changes to the lock value + + const exeId: string = getExeId(req) + if (this.#functionCacheDisabled || !this.#functions.has(exeId)) { + log('creating execution %o', exeId) + const opts = { isEffect: true, isEffectRecovered: false } + // TODO read side effect config from io.json + const { runnable, commit } = io.getExecution() + assert(runnable, 'origin not found') + const isolateApi = IA.create(accumulator, runnable, commit, opts) + if (req.isolate === 'system') { + log('system isolate') + isolateApi.context = c + } + const compartment = await Compartment.create(req.isolate) + const functions = compartment.functions(isolateApi) + const execution = { + function: Promise.resolve().then(() => { + if (!(req.functionName in functions)) { + const msg = `isolate: ${req.isolate} function: ${req.functionName}` + throw new Error('function not found: ' + msg) + } + return functions[req.functionName](req.params) + }).then((result) => { + const outcome: Outcome = {} + if (result !== undefined) { + outcome.result = result as JsonValue // sorry 🤷 + log('self result: %o', outcome.result) + } + return outcome + }).catch((error) => { + // TODO cancel all outstanding requests + // and transmit them to the chains that are running them + log('error', error.message) + const outcome: Outcome = { error: serializeError(error) } + return outcome + }), + accumulator, + api: isolateApi, + commits: [fs.oid], + } + this.#functions.set(exeId, execution) + } else { + const execution = this.#functions.get(exeId) + assert(execution, 'execution not found') + if (execution.commits.includes(fs.oid)) { + // TODO exe should be idempotent ? + throw new Error('request already executed for commit: ' + fs.oid) + } + } + log('running execution %o', exeId) + const execution = this.#functions.get(exeId) + assert(execution, 'execution not found') + + execution.commits.push(fs.oid) + execution.accumulator.absorb(accumulator) + + const trigger = Symbol('🏎️') + const accumulatorPromise = execution.accumulator.activate(trigger) + const outcome = await Promise.race([execution.function, accumulatorPromise]) + execution.accumulator.deactivate() + + let result: ExeResult + const sequence = io.getSequence(req) + if (outcome === trigger) { + const { accumulations } = execution.accumulator + assert(accumulations.length > 0, 'no accumulations') + log('accumulator triggered first', accumulations) + const requests = accumulations.map((a) => a.request) + result = { + fs: execution.accumulator.fs, + pending: { commit: fs.oid, requests, sequence }, + } + } else { + assert(typeof outcome === 'object') + log('exe complete %o', exeId) + this.#functions.delete(exeId) + const target = fs.pid + const reply: SolidReply = { target, sequence, outcome } + + result = { fs: execution.accumulator.fs, reply } + } + return result + } +} + +type Execution = { + function: Promise + accumulator: Accumulator + api: IA + commits: string[] +} diff --git a/_import-artifact/git/branch.test.ts b/_import-artifact/git/branch.test.ts new file mode 100644 index 000000000000..996b16860bf8 --- /dev/null +++ b/_import-artifact/git/branch.test.ts @@ -0,0 +1,120 @@ +import { assert, expect, log, merge } from '@utils' +import { solidify } from '@/git/solidify.ts' +import { branch } from '@/git/branch.ts' +import { + IoStruct, + isMergeReply, + MergeReply, + PartialPID, + PID, + Pierce, + Proctype, + UnsequencedRequest, +} from '@/constants.ts' +import FS from '@/git/fs.ts' +import DB from '@/db.ts' + +Deno.test('pierce branch', async (t) => { + const partial: PartialPID = { + account: 'git', + repository: 'test', + branches: ['main'], + } + const db = await DB.create(DB.generateAesKey()) + const baseFs = await FS.init(partial, db) + const target = baseFs.pid + const mockRequest: UnsequencedRequest = { + target, + isolate: 'mock', + functionName: 'mock', + params: {}, + proctype: Proctype.enum.SERIAL, + } + const branchPierce = (ulid: string): Pierce => ({ + target, + ulid, + isolate: 'test-isolate', + functionName: 'test', + params: { request: mockRequest }, + proctype: Proctype.enum.BRANCH, + }) + const reply: MergeReply = { + target, + sequence: 0, + outcome: { result: 'test-result' }, + commit: '4b825dc642cb6eb9a060e54bf8d69288fbee4904', + source: target, + } + const pierce = branchPierce('pierce') + let head: string + let parentHead: string + let branches: number[] + let branchPid: PID + + await t.step('parent', async () => { + const solids = await solidify(baseFs, [pierce]) + head = solids.oid + parentHead = head + branches = solids.branches + expect(branches).toEqual([0]) + expect(solids.exe).toBeUndefined() + const next = FS.open(baseFs.pid, head, db) + expect(next.oid).toEqual(solids.oid) + const io = await next.readJSON('.io.json') + expect(io.sequence).toBe(1) + expect(io.requests[0]).toEqual(pierce) + expect(io.requests[0].proctype).toEqual(Proctype.enum.BRANCH) + }) + await t.step('child', async () => { + const parentFs = FS.open(baseFs.pid, head, db) + expect(parentFs.oid).toEqual(head) + expect(branches).toEqual([0]) + const sequence = branches[0] + const branched = await branch(parentFs, sequence) + expect(branched.origin.source).toEqual(pierce.target) + head = branched.head + branchPid = branched.origin.target + expect(branchPid).toEqual(branched.pid) + }) + let mergeReply: MergeReply + await t.step('child reply', async () => { + const branchReply = merge({}, reply, { target: branchPid }) + const branchFs = FS.open(branchPid, head, db) + expect(branchFs.oid).toEqual(head) + const solids = await solidify(branchFs, [branchReply]) + const { poolables } = solids + log('poolables', poolables[0]) + expect(poolables.length).toBe(1) + assert(isMergeReply(poolables[0])) + mergeReply = poolables[0] + expect(mergeReply.outcome).toEqual(reply.outcome) + expect(mergeReply.target).toEqual(target) + + const { commit, sequence, source } = mergeReply + const isReply = true + const recovered = await db.readPoolable(mergeReply.target, { + commit, + sequence, + source, + isReply, + }) + expect(recovered).toEqual(mergeReply) + }) + await t.step('child merge to parent', async () => { + const parentFs = FS.open(baseFs.pid, parentHead, db) + expect(parentFs.oid).not.toEqual(head) + + const { poolables, oid } = await solidify(parentFs, [mergeReply]) + expect(poolables).toHaveLength(0) + const next = FS.open(parentFs.pid, oid, db) + const io = await next.readJSON('.io.json') + const outcome = io.replies[0] + expect(outcome).toEqual(mergeReply.outcome) + const lastCommit = await next.getCommit() + expect(lastCommit.parent).toHaveLength(2) + }) + db.stop() +}) +// TODO delete branches +// TODO custom names honoured +// TODO error if name collision diff --git a/_import-artifact/git/branch.ts b/_import-artifact/git/branch.ts new file mode 100644 index 000000000000..4802d28a0826 --- /dev/null +++ b/_import-artifact/git/branch.ts @@ -0,0 +1,55 @@ +import { assert, Debug } from '@utils' +import { Branched, Proctype } from '@/constants.ts' +import { RemoteRequest } from '@/constants.ts' +import IOChannel from '../io/io-channel.ts' +import { solidify } from './solidify.ts' +import FS from '@/git/fs.ts' + +const log = Debug('AI:git:branch') + +/** + * Given the fs from the parent branch, create a new branch from the + * given commit. + * @param fs the parent fs + * @param sequence the request sequence number in the parent to branch with + * @param atomic the atomic transaction object + */ +export const branch = async (fs: FS, sequence: number) => { + assert(!fs.isChanged, 'Cannot branch from a changed fs') + assert(sequence >= 0, 'sequence must be a whole number') + log('branch', sequence, fs.oid) + const io = await IOChannel.load(fs) + const pid = io.getBranchPid(sequence) + + const { isolate, functionName, params, deletes } = io.getRequest(sequence) + + const origin: RemoteRequest = { + isolate, + functionName, + params, + target: pid, + source: fs.pid, + sequence, + proctype: Proctype.enum.SERIAL, + commit: fs.oid, + } + log('origin', origin) + const branch = fs.branch(pid) + IOChannel.blank(branch) + if (deletes) { + deletes.forEach((d) => branch.delete(d)) + } + // TODO handle branch exists with an error reply + + const solids = await solidify(branch, [origin]) + assert(solids.exe, 'must have an exe') + assert(solids.branches.length === 0, 'must have no branches') + assert(solids.poolables.length === 0, 'must have no poolables') + assert(solids.deletes.length === 0, 'must have no deletes') + const branched: Branched = { + origin: solids.exe.request, + head: solids.oid, + pid, + } + return branched +} diff --git a/_import-artifact/git/fs.test.ts b/_import-artifact/git/fs.test.ts new file mode 100644 index 000000000000..3ecb0831d9bf --- /dev/null +++ b/_import-artifact/git/fs.test.ts @@ -0,0 +1,233 @@ +import * as utils from '@utils' +import DB from '@/db.ts' +import FS from './fs.ts' +import { addBranches, addPeer, IO_PATH, partialFromRepo } from '@/constants.ts' +const { expect } = utils +Deno.test('git/init', async (t) => { + const db = await DB.create(DB.generateAesKey()) + let fs: FS + await t.step('init', async () => { + const repo = 'account/repo' + const pid = partialFromRepo(repo) + fs = await FS.init(pid, db) + await expect(fs.read('hello.txt')).rejects.toThrow( + 'Could not find hello.txt', + ) + expect(await db.readHead(fs.pid)).toBe(fs.oid) + }) + await t.step('git paths', async () => { + const git = 'git paths are forbidden: ' + const json = 'path must be *.json' + const paths = [ + '.git', + '.git/something', + '.git/objects/something', + '.git/refs', + '.git/refs/heads', + '.git/refs/heads/main', + ] + const trailing = [...paths, ...paths.map((path) => path + '/')] + const forwards = [ + ...trailing, + ...trailing.map((path) => '/' + path), + ] + for (const path of forwards) { + expect(() => fs.write(path, ''), path).toThrow(git) + expect(() => fs.write(path, 'data'), path).toThrow(git) + expect(() => fs.writeJSON(path, ''), path).toThrow(json) + expect(() => fs.writeJSON(path, 'data'), path).toThrow(json) + expect(() => fs.delete(path), path).toThrow(git) + await expect(fs.read(path), path).rejects.toThrow(git) + await expect(fs.readBinary(path), path).rejects.toThrow(git) + await expect(fs.readJSON(path), path).rejects.toThrow(json) + await expect(fs.ls(path), path).rejects.toThrow(git) + await expect(fs.exists(path), path).rejects.toThrow(git) + if (path) { + expect(() => fs.logs(path), path).toThrow(git) + } + } + }) + await t.step('complicated paths', async () => { + const paths = ['', '.', '/', '/something', '/something/deep', '//something'] + for (const path of paths) { + if (['', '.', '/'].includes(path)) { + expect(() => fs.write(path, ''), path).toThrow() + expect(() => fs.write(path, 'data'), path).toThrow() + expect(() => fs.writeJSON(path, ''), path).toThrow() + expect(() => fs.writeJSON(path, 'data'), path).toThrow() + expect(() => fs.delete(path), path).toThrow() + await expect(fs.ls(path), path).resolves.not.toThrow() + await expect(Promise.resolve(fs.logs(path)), path).resolves.not + .toThrow() + } else { + expect(() => fs.delete(path), path).not.toThrow() + } + await expect(fs.read(path), path).rejects.toThrow() + await expect(fs.readBinary(path), path).rejects.toThrow() + await expect(fs.readJSON(path), path).rejects.toThrow() + await expect(fs.exists(path), path).resolves.not.toThrow() + } + }) + + await t.step('write', async () => { + const path = 'hello.txt' + const data = 'world' + + await expect(fs.exists(path)).resolves.toBeFalsy() + fs.write(path, 'data') + await expect(fs.exists(path)).resolves.toBeTruthy() + await expect(fs.read(path)).resolves.toBe('data') + fs.delete(path) + await expect(fs.exists(path)).resolves.toBeFalsy() + await expect(fs.read(path)).rejects.toThrow('Could not find file or') + fs.write(path, data) + await expect(fs.exists(path)).resolves.toBeTruthy() + await expect(fs.read(path)).resolves.toBe(data) + + expect(await db.readHead(fs.pid)).toBe(fs.oid) + const { next } = await fs.writeCommitObject('write single') + expect(await db.readHead(fs.pid), 'commit changed head').toBe(fs.oid) + + const read = await next.read(path) + expect(read).toBe(data) + await expect(next.exists(path)).resolves.toBeTruthy() + + fs = next + }) + await t.step('write nested', async () => { + const path = 'nested/deep/hello.txt' + const data = 'world' + + await expect(fs.exists(path)).resolves.toBeFalsy() + fs.write(path, 'data') + await expect(fs.exists(path)).resolves.toBeTruthy() + await expect(fs.read(path)).resolves.toBe('data') + fs.delete(path) + await expect(fs.exists(path)).resolves.toBeFalsy() + await expect(fs.read(path)).rejects.toThrow('Could not find file or') + fs.write(path, data) + await expect(fs.exists(path)).resolves.toBeTruthy() + await expect(fs.read(path)).resolves.toBe(data) + + const { next } = await fs.writeCommitObject('write nested') + await expect(next.exists(path)).resolves.toBeTruthy() + const read = await next.read(path) + expect(read).toBe(data) + const oldRead = await next.read('hello.txt') + expect(oldRead).toBe(data) + + fs = next + }) + await t.step('logs', async () => { + const logs = await fs.logs() + expect(logs.length).toBe(3) + }) + await t.step('delete', async () => { + const path = 'hello.txt' + fs.delete(path) + await expect(fs.read(path)).rejects.toThrow('Could not find file or') + const { next } = await fs.writeCommitObject('delete') + await expect(next.exists(path)).resolves.toBeFalsy() + await expect(next.read(path)).rejects.toThrow('Could not find hello.txt') + + fs = next + }) + db.stop() +}) +Deno.test('clone and pull', async (t) => { + const db = await DB.create(DB.generateAesKey()) + let fs: FS + await t.step('clone HAL', async () => { + fs = await FS.clone('dreamcatcher-tech/HAL', db) + expect(fs.oid).toHaveLength(40) + }) + await t.step('read', async () => { + const path = 'README.md' + const data = await fs.read(path) + expect(data).toContain('AI') + }) + await t.step('fetch', async () => { + const oid = await FS.fetch('dreamcatcher-tech/HAL', fs.pid, db) + expect(oid).toEqual(fs.oid) + }) + + // insert the PAT into the github repo + // do a push, using this auth method + + // make a new branch + // push this up to the git repo for testing + + db.stop() +}) +Deno.test('merge', async (t) => { + const db = await DB.create(DB.generateAesKey()) + let fs = await FS.init(partialFromRepo('test/merge'), db) + + await t.step('merge', async () => { + fs.write('hello.txt', 'world') + const { next } = await fs.writeCommitObject('merge') + const oid = await next.merge(fs.oid) + expect(oid).not.toEqual(fs.oid) + fs = next + }) + await t.step('merge conflict', async () => { + const readme = await fs.read('hello.txt') + expect(readme).toBe('world') + + fs.write('hello.txt', 'conflict 1') + const { next } = await fs.writeCommitObject('merge') + + const pid = addPeer(fs.pid, 'conflict') + const branch = fs.branch(pid) + branch.write('hello.txt', 'conflict 2') + const { next: branchNext } = await branch.writeCommitObject('branch') + + const oid = await next.merge(branchNext.oid) + + const merged = FS.open(next.pid, oid, db) + const readmeMerged = await merged.read('hello.txt') + expect(readmeMerged).toContain('conflict 1') + expect(oid).not.toEqual(fs.oid) + }) + + db.stop() +}) +Deno.test('overwrite', async () => { + const db = await DB.create(DB.generateAesKey()) + const pid = partialFromRepo('test/merge') + const base = await FS.init(pid, db) + base.write(IO_PATH, '0') + base.write('a.txt', 'a') + base.write('b.txt', 'b') + base.write('c/c.txt', 'c') + base.write('c/d.txt', 'd') + const { next } = await base.writeCommitObject('initial') + const childPid = addBranches(next.pid, 'child') + const branch = next.branch(childPid) + + branch.write(IO_PATH, '1') + branch.write('a.txt', 'A') + branch.delete('c/c.txt') + branch.write('c/e.txt', 'e') + branch.write('c/d.txt', 'D') + const { next: branchNext } = await branch.writeCommitObject('branch') + + await next.overwrite(branchNext.oid, 'c/c.txt') + expect(await next.read(IO_PATH)).toBe('0') + expect(await branchNext.read(IO_PATH)).toBe('1') + + expect(await next.read('a.txt')).toBe('A') + expect(await next.read('b.txt')).toBe('b') + expect(await next.read('c/c.txt')).toBe('c') + expect(await branchNext.exists('c/c.txt')).toBeFalsy() + expect(await next.read('c/d.txt')).toBe('D') + expect(await next.read('c/e.txt')).toBe('e') + + db.stop() +}) +// TODO block isolates from accessing .git paths + +// check that a nonexistent pid throws +// TODO test writing a file that hashes to the same as the existing one +// this should result in no commit being made. IO always changes, so this +// should still commit, but there should be no other file changes attempted. diff --git a/_import-artifact/git/fs.ts b/_import-artifact/git/fs.ts new file mode 100644 index 000000000000..f15ace234f17 --- /dev/null +++ b/_import-artifact/git/fs.ts @@ -0,0 +1,717 @@ +import diff3Merge from 'diff3' +import http from '$git/http/web/index.js' +import { assert, Debug, equal, posix } from '@utils' +import { + Change, + ENTRY_BRANCH, + IO_PATH, + isBaseRepo, + PartialPID, + PID, + print, + randomness, + sha1, + type TreeEntry, +} from '@/constants.ts' +import git, { Errors, type MergeDriverCallback } from '$git' +import type DB from '@/db.ts' +import { GitKV } from './gitkv.ts' +const log = Debug('git:fs') +const dir = '/' + +type Upsert = { oid: string } | { data: string | Uint8Array } + +export default class FS { + readonly #pid: PID + readonly #oid: string + readonly #gitkv: GitKV + readonly #db: DB + readonly #upserts = new Map() + readonly #deletes = new Set() + /** If present, the commit oid that overwrote the current FS */ + #overwrite: string | undefined + + static #caches = new Map() + static #getGitCache(pid: PID) { + if (!FS.#caches.has(pid.repoId)) { + FS.#caches.set(pid.repoId, {}) + } + const cache = FS.#caches.get(pid.repoId) + assert(cache) + return cache + } + static clearCache(pid: PID) { + FS.#caches.delete(pid.repoId) + } + get #git() { + const cache = FS.#getGitCache(this.#pid) + return { fs: this.fs, dir, cache } + } + get pid() { + return this.#pid + } + /** The oid used as the root git directory */ + get #internalOid() { + return this.#overwrite || this.oid + } + /** The commit oid backing this filesystem */ + get oid() { + return this.#oid + } + get fs() { + return { promises: this.#gitkv } + } + get isChanged() { + return this.#overwrite || this.#upserts.size > 0 || this.#deletes.size > 0 + } + get upserts() { + return [...this.#upserts.keys()] + } + get deletes() { + return [...this.#deletes] + } + private constructor(pid: PID, oid: string, db: DB) { + assert(sha1.test(oid), 'Commit not SHA-1: ' + oid) + this.#pid = pid + this.#oid = oid + this.#db = db + this.#gitkv = GitKV.recreate(db, pid) + } + static open(pid: PID, commit: string, db: DB) { + return new FS(pid, commit, db) + } + static async openHead(pid: PID, db: DB) { + const head = await db.readHead(pid) + if (!head) { + throw new Error('HEAD not found: ' + print(pid)) + } + return new FS(pid, head, db) + } + static async init(partial: PartialPID, db: DB, owner?: PID) { + const repoId = generateFakeRepoId() + const pid = { ...partial, repoId } + const fs = { promises: GitKV.createBlank(db, pid) } + assert(isBaseRepo(pid), 'not a base repo: ' + print(pid)) + + await git.init({ fs, dir, defaultBranch: pid.branches[0] }) + log('init complete') + const author = { name: owner ? print(owner) : 'git/init' } + const cache = FS.#getGitCache(pid) + // TODO insert the .io.json file that includes the consensus config + // TODO write owner into config + const commit = await git.commit({ + noUpdateBranch: true, + fs, + dir, + message: 'initial commit', + author, + cache, + }) + await db.atomic().createBranch(pid, commit).commit() + const init = new FS(pid, commit, db) + return init + } + static async clone(repo: string, db: DB) { + // TODO lock the whole repo in case something is running + // TODO detect the main branch from remote/HEAD + assert(repo.split('/').length === 2, 'invalid repo: ' + repo) + const url = `https://github.com/${repo}.git` + + const repoId = generateFakeRepoId() + const [account, repository] = repo.split('/') + const pid = { repoId, account, repository, branches: [ENTRY_BRANCH] } + const fs = { promises: GitKV.createBlank(db, pid) } + + fs.promises.oneAtomicWrite = db.atomic() + const cache = FS.#getGitCache(pid) + await git.clone({ fs, dir, url, http, noCheckout: true, cache }) + const commit = await db.readHead(pid) + assert(commit, 'HEAD not found: ' + pid.branches.join('/')) + const clone = new FS(pid, commit, db) + return clone + } + static async fetch(repo: string, pid: PID, db: DB) { + assert(repo.split('/').length === 2, 'invalid repo: ' + repo) + const url = `https://github.com/${repo}.git` + + const lockId = await db.lockRepo(pid) + try { + const { fs } = await FS.openHead(pid, db) + const cache = FS.#getGitCache(pid) + const result = await git.fetch({ fs, http, dir, url, cache }) + const { fetchHead } = result + assert(fetchHead, 'fetchHead not found') + + // TODO make release be atomic + return fetchHead + } finally { + await db.releaseRepoLock(pid, lockId) + } + } + static async push(repo: string, pid: PID, db: DB) { + assert(repo.split('/').length === 2, 'invalid repo: ' + repo) + const url = `https://github.com/${repo}.git` + assert(isBaseRepo(pid), 'not a base repo: ' + print(pid)) + const { fs } = await FS.openHead(pid, db) + const cache = FS.#getGitCache(pid) + const result = await git.push({ fs, http, dir, url, cache }) + const { ok } = result + if (!ok) { + throw new Error('push failed') + } + } + + tick(commit: string) { + return new FS(this.#pid, commit, this.#db) + } + /** @param the new PID to branch into */ + branch(pid: PID) { + assert(isSameRepo(this.#pid, pid), 'branching into a different repo') + return new FS(pid, this.oid, this.#db) + } + logs(filepath?: string, depth?: number) { + if (filepath) { + filepath = refine(filepath) + } + if (filepath === '.') { + filepath = undefined + } + return git.log({ ...this.#git, filepath, depth, ref: this.oid }) + } + + async writeCommitObject(message = '', parents: string[] = []) { + assert(this.isChanged, 'empty commit') + assert(parents.every((oid) => sha1.test(oid)), 'Parent not SHA-1') + const { oid, changes } = await this.#flush() + this.#upserts.clear() + this.#deletes.clear() + + const author = { name: 'git/commit' } + const nextCommit = await git.commit({ + ...this.#git, + noUpdateBranch: true, + message, + author, + tree: oid, + parent: [this.oid, ...parents], + }) + const { commit } = await git.readCommit({ ...this.#git, oid: nextCommit }) + + const next = new FS(this.#pid, nextCommit, this.#db) + return { next, changes, commit } + } + async theirsMerge(theirs: string) { + const parents = [theirs] + assert(parents.every((oid) => sha1.test(oid)), 'Parent not SHA-1') + assert(!this.isChanged, 'cannot merge with changes') + if (this.oid === theirs) { + throw new Error('Already merged') + } + const author = { name: 'git/theirsMerge' } + + const theirTree = await git.readTree({ ...this.#git, oid: theirs }) + const ourTree = await git.readTree({ ...this.#git, oid: this.oid }) + + const io = ourTree.tree.find((entry) => entry.path === IO_PATH) + const merged = theirTree.tree.filter((entry) => entry.path !== IO_PATH) + if (io) { + merged.push(io) + } + const tree = await git.writeTree({ ...this.#git, tree: merged }) + + const commit = await git.commit({ + ...this.#git, + noUpdateBranch: true, + message: 'Merge using "theirs" strategy', + author, + tree, + parent: [this.oid, ...parents], + }) + + const next = new FS(this.#pid, commit, this.#db) + return { next, changes: {}, commit } + } + async merge(commit: string) { + assert(!this.isChanged, 'cannot merge with changes') + if (this.oid === commit) { + return this.oid + } + const result = await git.merge({ + ...this.#git, + noUpdateBranch: true, + mergeDriver, + ours: this.oid, + theirs: commit, + author: { name: 'Artifact' }, + message: 'Merge', + }) + assert(result.oid, 'merge failed') + return result.oid + } + async exists(path: string) { + path = refine(path) + if (path === '.') { + return true + } + if (this.#deletes.has(path)) { + return false + } + if (this.#upserts.has(path)) { + return true + } + + try { + await this.readOid(path) + return true + } catch (error) { + // TODO move all errors to be FileNotFoundError + if ( + error instanceof Error && 'code' in error && + error.code === 'NotFoundError' + ) { + return false + } + throw error + } + } + + async readOid(path: string): Promise { + // TODO test how this works for "." + path = refine(path) + if (this.#deletes.has(path)) { + throw new Errors.NotFoundError(path) + } + + const dirname = posix.dirname(path) + const basename = posix.basename(path) + const tree = await this.readTree(dirname) + for (const entry of tree) { + if (entry.path === basename) { + return entry.oid + } + } + throw new Errors.NotFoundError(path) + } + async readTree(path: string = '.') { + path = refine(path) + const oid = this.#internalOid + const filepath = path === '.' ? undefined : path + try { + const { tree } = await git.readTree({ ...this.#git, oid, filepath }) + return tree + } catch (error) { + if ( + error instanceof Error && 'code' in error && + error.code === 'NotFoundError' + ) { + // remove the git commit hash from the error so it is repeatable + throw new Errors.NotFoundError(path) + } + throw error + } + } + delete(path: string) { + path = refine(path) + assert(path !== '.', 'cannot delete root') + // TODO delete a whole directory + log('delete', path) + this.#deletes.add(path) + this.#upserts.delete(path) + } + writeJSON(path: string, json: unknown) { + // TODO store json objects specially, only strinify on commit + // then broadcast changes as json object purely + assert(posix.extname(path) === '.json', `path must be *.json: ${path}`) + path = refine(path) + const string = JSON.stringify(json, null, 2) + assert(typeof string === 'string', 'stringify failed') + return this.write(path, string) + } + write(path: string, data: string | Uint8Array) { + path = refine(path) + assert(path !== '.', 'cannot write to root') + // TODO ensure cannot write to a directory + log('write', path, data) + this.#upserts.set(path, { data }) + this.#deletes.delete(path) + } + async readJSON(path: string, commit?: string): Promise { + assert(posix.extname(path) === '.json', `path must be *.json: ${path}`) + const data = await this.read(path, commit) + return JSON.parse(data) + } + async read(path: string, commit?: string) { + const blob = await this.readBinary(path, commit) + return new TextDecoder().decode(blob) + } + async readBinary(path: string, commit?: string): Promise { + path = refine(path) + log('readBinary', path, commit) + + if (commit) { + const { blob } = await this.#readBlob(path, commit) + return blob + } + + if (this.#deletes.has(path)) { + throw new Error('Could not find file or directory: ' + path) + } + if (this.#upserts.has(path)) { + const upsert = this.#upserts.get(path) + assert(upsert, 'upsert not found') + if ('data' in upsert) { + if (typeof upsert.data === 'string') { + return new TextEncoder().encode(upsert.data) + } + return upsert.data + } else { + const { oid } = upsert + const { blob } = await this.#safeReadBlob({ oid }) + return blob + } + } + const { blob } = await this.#readBlob(path) + return blob + } + async #readBlob(filepath: string, commit?: string) { + const { blob, oid } = await this.#safeReadBlob({ + oid: commit || this.#internalOid, + filepath, + }) + assert(blob instanceof Uint8Array, 'blob not Uint8Array: ' + typeof blob) + return { blob, oid } + } + async #safeReadBlob(params: { oid: string; filepath?: string }) { + try { + const { oid, filepath } = params + return await git.readBlob({ ...this.#git, oid, filepath }) + } catch (error) { + if ( + error instanceof Error && 'code' in error && + error.code === 'NotFoundError' + ) { + throw new Errors.NotFoundError(params.filepath || '.') + } + throw error + } + } + + async ls(path: string = '.') { + path = refine(path) + // TODO make a streaming version of this for very large dirs + // TODO handle changes in the directory like deletes and upserts + log('ls', path) + const filepath = path === '.' ? undefined : path + const tree = await this.readTree(filepath) + + tree.sort((a, b) => { + if (a.type === 'tree' && b.type === 'blob') { + return -1 + } + if (a.type === 'blob' && b.type === 'tree') { + return 1 + } + if (a.path.startsWith('.') && !b.path.startsWith('.')) { + return -1 + } + if (!a.path.startsWith('.') && b.path.startsWith('.')) { + return 1 + } + return a.path.localeCompare(b.path) + }) + return tree.map((entry) => { + if (entry.type === 'tree') { + return entry.path + '/' + } + assert(entry.type === 'blob', 'entry type not blob: ' + entry.type) + return entry.path + }) + } + async getCommit() { + const result = await git.readCommit({ ...this.#git, oid: this.oid }) + return result.commit + } + copyChanges(from: FS) { + assert(!this.isChanged, 'cannot copy changes to a changed FS') + assert(equal(this.#pid, from.#pid), 'changes are from different pids') + for (const path of from.#deletes) { + this.#deletes.add(path) + } + for (const [path, upsert] of from.#upserts) { + this.#upserts.set(path, upsert) + } + this.#overwrite = from.#overwrite + } + async isPidExists(pid: PID) { + // TODO maybe if this gets read, it gets stored in the accumulator + // store the root head of our repo in the git file structure, used to + // reference from + return !!await this.#db.readHead(pid) + } + async mv(from: string, to: string) { + await this.cp(from, to) + this.delete(from) + } + async cp(from: string, to: string) { + // TODO check using directories + from = refine(from) + to = refine(to) + assert(from !== to, 'source and destination are the same') + assert(await this.exists(from), 'source does not exist: ' + from) + assert(!await this.exists(to), 'destination already exists: ' + to) + const oid = await this.readOid(from) + this.#upserts.set(to, { oid }) + } + async overwrite(commit: string, ...ignores: string[]) { + // TODO allow changes so long as they are in the ignored set + assert(!this.isChanged, 'Uncommitted changes may be lost') + assert(sha1.test(commit), 'Commit not SHA-1: ' + commit) + assert(this.oid !== commit, 'cannot overwrite with same commit') + assert(this.#overwrite !== commit, 'cannot overwrite the same commit twice') + ignores.push(IO_PATH) + ignores.forEach(refine) + + const result = await git.readCommit({ ...this.#git, oid: commit }) + assert(result, 'commit not found: ' + commit) + + for (const ignore of ignores) { + if (this.#upserts.has(ignore)) { + continue + } + if (this.#deletes.has(ignore)) { + continue + } + if (await this.exists(ignore)) { + const oid = await this.readOid(ignore) + // TODO check if it is unchanged that this is handled + this.#upserts.set(ignore, { oid }) + } else { + // TODO check that deleting something not present is handled + this.#deletes.add(ignore) + } + } + + this.#overwrite = commit + } + async #flush() { + const changes: { [key: string]: Change } = {} + const oid = this.#internalOid + const { tree: base } = await git.readTree({ ...this.#git, oid }) + log('flush tree', base) + const tree: Tree = { + oid, + tree: base, + upserts: new Map(), + deletes: new Set(), + children: new Map(), + } + for (const [path, upsert] of this.#upserts) { + let patch: string | undefined + let blob: Uint8Array + let oid: string + if ('data' in upsert) { + if (typeof upsert.data === 'string') { + patch = upsert.data + blob = new TextEncoder().encode(upsert.data) + } else { + blob = upsert.data + } + oid = await git.writeBlob({ ...this.#git, blob }) + } else { + oid = upsert.oid + } + // TODO parallelize + log('hash', oid) + const parent = ensurePath(tree, path) + const filename = path.split('/').pop() + assert(filename, 'filename not found: ' + path) + // TODO ignore if already exists + parent.upserts.set(filename, { + // https://isomorphic-git.org/docs/en/walk#walkerentry-mode + mode: '100644', + path: filename, + oid, + type: 'blob', + }) + changes[path] = { oid, patch } + } + + for (const path of this.#deletes) { + log('delete', path) + const parent = ensurePath(tree, path) + const filename = path.split('/').pop() + assert(filename, 'filename not found: ' + path) + parent.deletes.add(filename) + changes[path] = {} + // TODO should be able to wipe a whole dir with no effort here + } + + await retrieveAffectedTrees(tree, this.#git) + + await bubbleChanges(tree, this.#git) + + return { oid: tree.oid, changes } + } +} +type Tree = { + oid?: string + tree?: TreeObject + upserts: Map + deletes: Set + children: Map +} +type TreeObject = TreeEntry[] + +const ensurePath = (tree: Tree, path: string) => { + const parts = path.split('/') + parts.pop() + let parent = tree + for (let i = 0; i < parts.length; i++) { + const part = parts[i] + let child = parent.children.get(part) + if (!child) { + child = { + upserts: new Map(), + deletes: new Set(), + children: new Map(), + } + parent.children.set(part, child) + } + parent = child + } + return parent +} +type Opts = { fs: { promises: GitKV }; cache: object; dir: string } +const retrieveAffectedTrees = async (tree: Tree, opts: Opts) => { + const promises = [] + if (!tree.tree) { + assert(tree.oid, 'tree oid not found') + const result = await git.readTree({ ...opts, oid: tree.oid }) + tree.tree = result.tree + } + for (const entry of tree.tree) { + if (entry.type === 'tree') { + if (tree.children.has(entry.path)) { + const child = tree.children.get(entry.path) + assert(child, 'child not found: ' + entry.path) + child.oid = entry.oid + promises.push(retrieveAffectedTrees(child, opts)) + } + } + } + await Promise.all(promises) +} +const bubbleChanges = async (tree: Tree, opts: Opts) => { + const layers = treeToLayers(tree) + log('layers', layers) + while (layers.length) { + const layer = layers.pop() + for (const item of layer!) { + let tree = item.tree || [] + tree = tree.filter((entry) => { + if (item.upserts.has(entry.path)) { + return false + } + if (item.deletes.has(entry.path)) { + return false + } + if (item.children.has(entry.path)) { + return false + } + return true + }) + for (const [, entry] of item.upserts) { + tree.push(entry) + } + for (const [path, child] of item.children) { + assert(child.oid, 'child oid not found: ' + path) + const entry: TreeEntry = { + mode: '040000', + path, + oid: child.oid, + type: 'tree', + } + tree.push(entry) + } + item.oid = await git.writeTree({ ...opts, tree }) + log('write tree', item.oid) + } + } +} + +const treeToLayers = (tree: Tree, layers: Tree[][] = [], level: number = 0) => { + if (!layers[level]) { + layers[level] = [] + } + layers[level].push(tree) + for (const child of tree.children.values()) { + treeToLayers(child, layers, level + 1) + } + return layers +} +const refine = (path: string) => { + path = path.trim() + while (path.startsWith('/')) { + path = path.slice(1) + } + while (path.endsWith('/')) { + path = path.slice(0, -1) + } + while (path.startsWith('./')) { + path = path.slice(2) + } + if (!path) { + path = '.' + } + path = posix.normalize(path) + assert(path, `path must be relative: ${path}`) + assert(!posix.isAbsolute(path), `path must be relative: ${path}`) + assert(path !== '.git', '.git paths are forbidden: ' + path) + assert(!path.startsWith('.git/'), '.git paths are forbidden: ' + path) + assert(!path.endsWith('/'), 'path must not end with /: ' + path) + assert(!path.startsWith('..'), 'path must not start with ..: ' + path) + return path +} + +const generateFakeRepoId = () => { + // TODO make this genuine based on the genesis commit + return `rep_${randomness()}` +} + +const mergeDriver: MergeDriverCallback = ({ contents, path }) => { + const baseContent = contents[0] + const ourContent = contents[1] + const theirContent = contents[2] + + if (path === IO_PATH) { + return { cleanMerge: true, mergedText: ourContent } + } + + const LINEBREAKS = /^.*(\r?\n|$)/gm + const ours = ourContent.match(LINEBREAKS) + const base = baseContent.match(LINEBREAKS) + const theirs = theirContent.match(LINEBREAKS) + const result = diff3Merge(ours, base, theirs) + let mergedText = '' + for (const item of result) { + if (item.ok) { + mergedText += item.ok.join('') + } + if (item.conflict) { + mergedText += item.conflict.a.join('') + } + } + return { cleanMerge: true, mergedText } +} +const isSameRepo = (a: PID, b: PID) => { + if (a.repoId !== b.repoId) { + return false + } + if (a.account !== b.account) { + return false + } + if (a.repository !== b.repository) { + return false + } + return true +} diff --git a/_import-artifact/git/gitkv.ts b/_import-artifact/git/gitkv.ts new file mode 100644 index 000000000000..78b2b683fd55 --- /dev/null +++ b/_import-artifact/git/gitkv.ts @@ -0,0 +1,293 @@ +import { Debug } from '@utils' +import { getRepoBase, headKeyToPid } from '@/keys.ts' +import type DB from '@/db.ts' +import { assert, AssertionError, equal } from '@utils' +import { PID } from '@/constants.ts' +import { Atomic } from '@/atomic.ts' + +const log = Debug('git:KV') + +export class GitKV { + #allowed = ['config', 'objects', 'refs'] + #dropWrites = ['HEAD'] + #db: DB + #pid: PID + #exists: Set | undefined + #cache = Cache.create() + + private constructor(db: DB, pid: PID, isBlank: boolean = false) { + this.#db = db + this.#pid = pid + if (isBlank) { + this.#exists = new Set() + } + } + static recreate(db: DB, pid: PID) { + return new GitKV(db, pid) + } + static createBlank(db: DB, pid: PID) { + const isBlankDuringInitAndClone = true + return new GitKV(db, pid, isBlankDuringInitAndClone) + } + + isIgnored(path: string) { + const sliced = path.slice('/.git/'.length) + return this.#dropWrites.includes(sliced) + } + async readFile(path: string, opts: EncodingOpts) { + log('readFile', path, opts) + if (!path && !opts) { + throw new Error('path and opts are required') + } + if (path === '/.git/index') { + throw new FileNotFoundError('file not found: ' + path) + } + if (path === '/.git/HEAD') { + let ref = `ref: refs/heads` + for (const branch of this.#pid.branches) { + ref += `/${branch}` + } + log('readFile HEAD ref', ref) + return ref + } + if (this.#exists && !this.#exists.has(path)) { + throw new FileNotFoundError('file not found: ' + path) + } + if (path.startsWith('/.git/refs/heads/')) { + // only allow reading heads from the current branch, else what doing ? + const rest = path.slice('/.git/refs/heads/'.length) + const branches = rest.split('/') + log('readFile refs/heads:', branches) + assert(equal(branches, this.#pid.branches), 'branches do not match') + const head = await this.#db.readHead(this.#pid) + if (!head) { + throw new FileNotFoundError('file not found: ' + path) + } + return head + } + + if (opts && opts.encoding && opts.encoding !== 'utf8') { + throw new Error('only utf8 encoding is supported') + } + + const pathKey = this.#getAllowedPathKey(path) + let result: Uint8Array + if (await this.#cache.has(pathKey)) { + result = this.#cache.get(pathKey) + } else { + const dbResult = await this.#db.blobGet(pathKey) + + if (!dbResult.versionstamp) { + log('readFile not found', path, opts) + throw new FileNotFoundError('file not found: ' + path) + } + result = dbResult.value + await this.#cache.set(pathKey, result) + } + if (opts && opts.encoding === 'utf8') { + const string = new TextDecoder().decode(result) + log('readFile', path, opts, string) + return string + } + log('readFile', path, opts, typeof result) + return result + } + async writeFile( + path: string, + data: Uint8Array | string, + opts: EncodingOpts, + ) { + log('writeFile', path, data, opts) + if (opts && opts.encoding && opts.encoding !== 'utf8') { + throw new Error('only utf8 encoding is supported') + } + if (this.isIgnored(path)) { + log('writeFile ignored', path) + return + } + if (path === '/.git/index') { + throw new Error('will not write to index') + } + if (this.#exists) { + this.#exists.add(path) + } + const pathKey = this.#getAllowedPathKey(path) + + // TODO skip the remote HEAD writes too ? + if (path.startsWith('/.git/refs/heads/')) { + // TODO use the head tool on this.#db to ensure consistency + assert(typeof data === 'string', 'data must be a string') + const pid = headKeyToPid(pathKey) + // TODO ensure have maintenance while this is being changed + assert(this.#oneAtomicWrite, 'no atomic write provided') + const atomic = this.#oneAtomicWrite + this.#oneAtomicWrite = undefined + await atomic.createBranch(pid, data.trim()).commit() + } else { + if (typeof data === 'string') { + data = new TextEncoder().encode(data) + } + const promise = this.#cache.set(pathKey, data) + await this.#db.blobSet(pathKey, data) + await promise + } + log('writeFile done:', pathKey) + } + #oneAtomicWrite: Atomic | undefined + set oneAtomicWrite(atomic: Atomic) { + this.#oneAtomicWrite = atomic + } + async unlink(path: string) { + log('unlink', path) + if (path === '/.git/shallow') { + return + } + return await Promise.reject(new Error('not implemented')) + } + async readdir(path: string, options?: object) { + log('readdir', path) + assert(!options, 'options not supported') + let pathKey = getRepoBase(this.#pid) + if (path !== '/.git') { + pathKey = this.#getAllowedPathKey(path) + } + const results = await this.#db.listImmediateChildren(pathKey) + log('readdir', path, results) + + return results + } + mkdir(path: string) { + log('mkdir', path) + return Promise.resolve() + } + async rmdir(path: string) { + log('rmdir', path) + return await Promise.reject(new Error('not implemented')) + } + async stat(path: string) { + log('stat', path) + // generate the key for the path + let pathKey + try { + pathKey = this.#getAllowedPathKey(path) + } catch (error) { + if (error instanceof AssertionError) { + throw new FileNotFoundError('file not found: ' + path) + } + throw error + } + if (this.#exists && !this.#exists.has(path)) { + throw new FileNotFoundError('file not found: ' + path) + } + log('stat pathKey', pathKey) + let exists = false + if (path.startsWith('/.git/refs/heads/')) { + const pid = headKeyToPid(pathKey) + const head = await this.#db.readHead(pid) + exists = !!head + } else { + if (await this.#cache.has(pathKey)) { + exists = true + } else { + if (path.startsWith('/.git/objects/')) { + // wastes a round trip to the db otherwise + throw new FileNotFoundError('file not found: ' + path) + } + exists = await this.#db.blobExists(pathKey) + } + } + if (!exists) { + throw new FileNotFoundError('file not found: ' + path) + } + return {} + } + async lstat(path: string) { + log('lstat', path) + const message = 'not implemented: ' + path + return await Promise.reject(new FileNotFoundError(message)) + } + async readlink(path: string) { + log('readlink', path) + return await Promise.reject(new Error('not implemented')) + } + async symlink(target: string, path: string, type: string) { + log('symlink', target, path, type) + return await Promise.reject(new Error('not implemented')) + } + async chmod(path: string, mode: number) { + log('chmod', path, mode) + return await Promise.reject(new Error('not implemented')) + } + #getAllowedPathKey(path: string) { + assert(path.startsWith('/.git/'), 'path must start with /.git/') + const rest = path.slice('/.git/'.length) + assert(rest, 'path must not be bare') + const prefix = getRepoBase(this.#pid) + const pathKey = rest.split('/') + assert(this.#allowed.includes(pathKey[0]), 'path not allowed: ' + pathKey) + + return [...prefix, ...pathKey] + } +} + +type EncodingOpts = { + encoding?: 'utf8' +} + +export class FileNotFoundError extends Error { + code = 'ENOENT' + constructor(message: string) { + super(message) + this.name = 'FileNotFoundError' + } +} +class Cache { + static create() { + return new Cache() + } + static #local = new Map() + #big: globalThis.Cache | undefined + async #load() { + if ('caches' in globalThis && !this.#big) { + // TODO name the caches per repo so they can be deleted granularly + this.#big = await caches.open('hashbucket') + } + } + async has(key: Deno.KvKey) { + const url = toUrl(key) + if (Cache.#local.has(url)) { + return true + } + + await this.#load() + if (this.#big) { + const cached = await this.#big.match(url) + if (cached) { + const cloned = cached.clone() + const bytes = await cloned.bytes() + Cache.#local.set(url, bytes) + return true + } + } + return false + } + get(key: Deno.KvKey) { + const url = toUrl(key) + if (Cache.#local.has(url)) { + const result = Cache.#local.get(url) + assert(result, 'cache inconsistency') + return result + } + throw new Error('not found: ' + key.join('/')) + } + async set(key: Deno.KvKey, value: Uint8Array) { + await this.#load() + const url = toUrl(key) + Cache.#local.set(url, value) + if (this.#big) { + const request = new Request(url) + await this.#big.put(request, new Response(value)) + } + } +} +const toUrl = (pathKey: Deno.KvKey) => 'http://' + pathKey.join('/') diff --git a/_import-artifact/git/serial.test.ts b/_import-artifact/git/serial.test.ts new file mode 100644 index 000000000000..dc7681446715 --- /dev/null +++ b/_import-artifact/git/serial.test.ts @@ -0,0 +1,136 @@ +import { assert, expect, log } from '@utils' +import { solidify } from '@/git/solidify.ts' +import { + IoStruct, + MergeReply, + PartialPID, + PID, + Pierce, + Proctype, + UnsequencedRequest, +} from '@/constants.ts' +import FS from './fs.ts' +import DB from '@/db.ts' + +Deno.test('pierce serial', async (t) => { + const db = await DB.create(DB.generateAesKey()) + const partial: PartialPID = { + account: 'git', + repository: 'test', + branches: ['main'], + } + let fs = await FS.init(partial, db) + const target: PID = fs.pid + const mockRequest: UnsequencedRequest = { + target, + isolate: 'mock', + functionName: 'mock', + params: {}, + proctype: Proctype.enum.SERIAL, + } + // TODO move away from using pierces to test these functions + const pierceFactory = (ulid: string): Pierce => ({ + target, + ulid, + isolate: 'test-isolate', + functionName: 'test', + params: { request: mockRequest }, + proctype: Proctype.enum.SERIAL, + }) + const reply: MergeReply = { + target, + sequence: 0, + outcome: { result: 'test-result' }, + source: target, + commit: '4b825dc642cb6eb9a060e54bf8d69288fbee4904', + } + await t.step('init', async () => { + const logs = await fs.logs() + expect(logs).toHaveLength(1) + expect(fs.pid).toEqual(target) + }) + const pierce = pierceFactory('pierce') + await t.step('pierce', async () => { + const { exe, oid } = await solidify(fs, [pierce]) + assert(exe) + expect(exe.request).not.toHaveProperty('ulid') + + fs = FS.open(fs.pid, oid, db) + const io = await fs.readJSON('.io.json') + log('io', io) + expect(io.sequence).toBe(1) + expect(io.requests[0]).toEqual(pierce) + }) + await t.step('pierce reply', async () => { + const { oid, poolables, exe } = await solidify(fs, [reply]) + expect(oid).not.toBe(fs.oid) + expect(exe).toBeUndefined() + expect(poolables).toHaveLength(0) + + fs = FS.open(fs.pid, oid, db) + const io = await fs.readJSON('.io.json') + log('io', io) + expect(io.sequence).toBe(1) + expect(io.replies[0]).toEqual(reply.outcome) + }) + await t.step('second action blanks io', async () => { + const { oid, exe } = await solidify(fs, [pierce]) + expect(oid).not.toBe(fs.oid) + expect(exe).toBeDefined() + + fs = FS.open(fs.pid, oid, db) + const io = await fs.readJSON('.io.json') + log('io', io) + expect(io.sequence).toBe(2) + expect(io.requests[0]).toBeUndefined() + expect(io.requests[1]).toEqual(pierce) + expect(io.replies[0]).toBeUndefined() + }) + await t.step('multiple requests', async () => { + const { oid, exe } = await solidify(fs, [ + pierceFactory('a'), + pierceFactory('b'), + ]) + expect(oid).not.toBe(fs.oid) + expect(exe).toBeUndefined() + + fs = FS.open(fs.pid, oid, db) + const io = await fs.readJSON('.io.json') + expect(io.sequence).toBe(4) + expect(Object.keys(io.requests).length).toBe(3) + expect(io.replies).toEqual({}) + }) + await t.step('multiple replies', async () => { + const pool = replies(1, 3) + const { oid } = await solidify(fs, pool) + expect(oid).not.toBe(fs.oid) + + fs = FS.open(fs.pid, oid, db) + const io = await fs.readJSON('.io.json') + expect(io.sequence).toBe(4) + expect(Object.keys(io.requests).length).toBe(3) + expect(Object.keys(io.replies).length).toEqual(3) + }) + db.stop() + // TODO cannot reply out of order + // TODO permissioning for inclusion in the pool +}) +const replies = (start: number, end: number) => { + const pool: MergeReply[] = [] + for (let i = start; i <= end; i++) { + const target = { + repoId: 't', + account: 'git', + repository: 'test', + branches: ['main'], + } + pool.push({ + target, + source: target, + commit: '4b825dc642cb6eb9a060e54bf8d69288fbee4904', + sequence: i, + outcome: { result: i }, + }) + } + return pool +} diff --git a/_import-artifact/git/solidify.ts b/_import-artifact/git/solidify.ts new file mode 100644 index 000000000000..491685d97dec --- /dev/null +++ b/_import-artifact/git/solidify.ts @@ -0,0 +1,177 @@ +import { assert, Debug, equal } from '@utils' +import FS from './fs.ts' +import { + isMergeReply, + isPierceRequest, + isRemoteRequest, + isReply, + MergeReply, + Pending, + PID, + Pierce, + Poolable, + Proctype, + RemoteRequest, + Request, + SolidReply, + SolidRequest, + Solids, +} from '@/constants.ts' +import IOChannel from '@io/io-channel.ts' + +const log = Debug('AI:git:solidify') + +/** + * Takes in an unordered collection of operations, and orders them in the + * .io.json file, then performs a commit. + * The allowed operations are: + * - execute: an action to be executed serially on this branch. May be + * external or from another branch. + * - branch: a new branch is being created with an origin action to be executed. + * - merge: an async result is returning, or an external action is being + * inserted into the branch. + * - reply: a result is being returned from a dispatch after serial execution. + */ +export const solidify = async ( + fs: FS, + pool: (Poolable | Pierce)[], + reply?: SolidReply, + pending?: Pending, +) => { + if (!Array.isArray(pool)) { + pool = [pool] + } + assert( + pool.length > 0 || reply || fs.isChanged || pending?.requests.length, + 'no-op', + ) + checkPool(pool) + assert(!reply || !pending, 'cannot have both reply and pending') + const io = await IOChannel.load(fs) + + const branches: number[] = [] + const poolables: (MergeReply | RemoteRequest)[] = [] + const parents = [] + const deletes = [] + + if (pending) { + assert(pending.requests.length, 'cannot be pending without requests') + log('solidifyPool pending', pending) + const { commit, requests, sequence } = pending + const sequenced = io.addPending(sequence, commit, requests) + sequenced.forEach((r) => { + if (equal(r.target, fs.pid)) { + collectBranch(r, r.sequence, branches) + } else { + const remoteRequest = { ...r, commit: 'updated post commit' } + poolables.push(remoteRequest) + } + }) + } + let poolPlusReply: (Poolable | Pierce | SolidReply)[] = pool + if (reply) { + poolPlusReply = [reply, ...pool] + } + for (const poolable of poolPlusReply) { + if (isReply(poolable)) { + log('reply', poolable) + const request = io.reply(poolable) + + const { outcome } = poolable + if (!isPierceRequest(request)) { + if (!equal(request.source, fs.pid)) { + const target = request.source + const source = request.target + const sequence = request.sequence + const reply: MergeReply = { + target, + source, + sequence, + outcome, + commit: 'updated post commit', + } + poolables.push(reply) + } + } + if (isBranch(request, fs.pid)) { + assert(isMergeReply(poolable), 'branch requires merge reply') + log('branch reply', poolable.commit) + parents.push(poolable.commit) + if (request.proctype === Proctype.enum.BRANCH) { + const { sequence } = poolable + const branchPid = io.getBranchPid(sequence) + deletes.push({ pid: branchPid, commit: poolable.commit }) + } + } + } else { + if (isRemoteRequest(poolable)) { + parents.push(poolable.commit) + } + const sequence = io.addRequest(poolable) + collectBranch(poolable, sequence, branches) + } + } + + let exe: Solids['exe'] + if (io.isExecutionAvailable()) { + exe = io.setExecution() + } + + if (pool.length || pending || reply) { + io.save() + } + // TODO pass in all the db checks to go with this write + // TODO write blobs atomically + + // const duplicateCheck = new Set(parents) + // assert(duplicateCheck.size === parents.length, 'duplicate parents') + + const { next, changes, commit } = await fs.writeCommitObject('pool', parents) + log('head', commit) + for (const poolable of poolables) { + poolable.commit = next.oid + } + + const solids: Solids = { + oid: next.oid, + commit, + changes, + exe, + branches, + poolables, + deletes, + } + return solids +} + +const checkPool = (pool: (Poolable | Pierce)[]) => { + if (!pool.length) { + return + } + const { target } = pool[0] + for (const poolable of pool) { + if (!equal(poolable.target, target)) { + throw new Error('pool has mixed targets') + } + } + // TODO use a hash on poolables to determine uniqueness in the pool + return target + // TODO a request and a reply with the same id cannot be in the same pool +} +const isBranch = (request: SolidRequest | Pierce, thisPid: PID) => { + if (!isPierceRequest(request)) { + if (!equal(request.target, thisPid)) { + return false + } + } + return request.proctype === Proctype.enum.BRANCH || + request.proctype === Proctype.enum.DAEMON +} +const collectBranch = (req: Request, sequence: number, branches: number[]) => { + const { proctype } = req + if (proctype === Proctype.enum.BRANCH || proctype === Proctype.enum.DAEMON) { + branches.push(sequence) + } else { + assert(proctype === Proctype.enum.SERIAL, `invalid proctype: ${proctype}`) + } +} diff --git a/_import-artifact/guts/__snapshots__/guts-ai-isolates.ts.json b/_import-artifact/guts/__snapshots__/guts-ai-isolates.ts.json new file mode 100644 index 000000000000..62e114617cb9 --- /dev/null +++ b/_import-artifact/guts/__snapshots__/guts-ai-isolates.ts.json @@ -0,0 +1,2012 @@ +{ + "isolates: files:ls": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/files.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + "stats": { + "created": 1729382628, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 768, + "openAiProcessingMs": 562, + "usage": { + "completion_tokens": 38, + "prompt_tokens": 307, + "total_tokens": 345, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_lHiQmnnw17B1t9V7bgKw6qnS" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Here are the files and directories in the current directory:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729382630, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1519, + "openAiProcessingMs": 1315, + "usage": { + "completion_tokens": 73, + "prompt_tokens": 397, + "total_tokens": 470, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_lHiQmnnw17B1t9V7bgKw6qnS" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls .", + "role": "user", + "name": "test" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Here are the files and directories in the current directory (\".\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729382632, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1487, + "openAiProcessingMs": 1289, + "usage": { + "completion_tokens": 76, + "prompt_tokens": 484, + "total_tokens": 560, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_lHiQmnnw17B1t9V7bgKw6qnS" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls .", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory (\".\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls /", + "role": "user", + "name": "test" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/files.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Q5pWioxFjQIwEuvZ86tihahC", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the root directory.\"],\"path\":\"/\",\"count\":false,\"all\":false}" + } + } + ] + }, + "stats": { + "created": 1729382634, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1041, + "openAiProcessingMs": 822, + "usage": { + "completion_tokens": 38, + "prompt_tokens": 574, + "total_tokens": 612, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_lHiQmnnw17B1t9V7bgKw6qnS" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls .", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory (\".\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls /", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Q5pWioxFjQIwEuvZ86tihahC", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the root directory.\"],\"path\":\"/\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_Q5pWioxFjQIwEuvZ86tihahC" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Here are the files and directories in the root directory (\"/\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729382636, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1715, + "openAiProcessingMs": 1496, + "usage": { + "completion_tokens": 76, + "prompt_tokens": 664, + "total_tokens": 740, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_lHiQmnnw17B1t9V7bgKw6qnS" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls .", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory (\".\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls /", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Q5pWioxFjQIwEuvZ86tihahC", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the root directory.\"],\"path\":\"/\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_Q5pWioxFjQIwEuvZ86tihahC" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the root directory (\"/\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls --all", + "role": "user", + "name": "test" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/files.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_vaQVVqRGxGLkaYiIffX6Lcfu", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list all files including hidden files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":true}" + } + } + ] + }, + "stats": { + "created": 1729382639, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1291, + "openAiProcessingMs": 1066, + "usage": { + "completion_tokens": 41, + "prompt_tokens": 755, + "total_tokens": 796, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_lHiQmnnw17B1t9V7bgKw6qnS" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls .", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory (\".\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls /", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Q5pWioxFjQIwEuvZ86tihahC", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the root directory.\"],\"path\":\"/\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_Q5pWioxFjQIwEuvZ86tihahC" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the root directory (\"/\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls --all", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_vaQVVqRGxGLkaYiIffX6Lcfu", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list all files including hidden files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":true}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\".gitattributes\",\".gitignore\",\".io.json\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_vaQVVqRGxGLkaYiIffX6Lcfu" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Here are all the files and directories in the current directory (including hidden files):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - .gitattributes\n - .gitignore\n - .io.json\n - LICENSE\n - README.md", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729382641, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1765, + "openAiProcessingMs": 1567, + "usage": { + "completion_tokens": 97, + "prompt_tokens": 858, + "total_tokens": 955, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lHiQmnnw17B1t9V7bgKw6qnS", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_lHiQmnnw17B1t9V7bgKw6qnS" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls .", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the current directory (\".\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls /", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Q5pWioxFjQIwEuvZ86tihahC", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list the files in the root directory.\"],\"path\":\"/\",\"count\":false,\"all\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_Q5pWioxFjQIwEuvZ86tihahC" + }, + { + "role": "assistant", + "content": "Here are the files and directories in the root directory (\"/\"):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls --all", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_vaQVVqRGxGLkaYiIffX6Lcfu", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user requested to list all files including hidden files in the current directory.\"],\"path\":\".\",\"count\":false,\"all\":true}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\".gitattributes\",\".gitignore\",\".io.json\",\"LICENSE\",\"README.md\",\"tmp\"]", + "tool_call_id": "call_vaQVVqRGxGLkaYiIffX6Lcfu" + }, + { + "role": "assistant", + "content": "Here are all the files and directories in the current directory (including hidden files):\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n - tmp/\n\n- **Files:**\n - .gitattributes\n - .gitignore\n - .io.json\n - LICENSE\n - README.md", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls ./nonexistent", + "role": "user", + "name": "test" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "The directory `./nonexistent` does not exist. Please check the path and try again.", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729382644, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1067, + "openAiProcessingMs": 866, + "usage": { + "completion_tokens": 20, + "prompt_tokens": 971, + "total_tokens": 991, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + }, + "isolates: files:write": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "write \"c\" to the file test.txt", + "role": "user", + "name": "test" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/files.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_SCX1hzuFugw4yCCyW5yxu334", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"Writing the character 'c' to the file test.txt.\"],\"path\":\"test.txt\",\"content\":\"c\"}" + } + } + ] + }, + "stats": { + "created": 1729382646, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1088, + "openAiProcessingMs": 866, + "usage": { + "completion_tokens": 35, + "prompt_tokens": 315, + "total_tokens": 350, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "write \"c\" to the file test.txt", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_SCX1hzuFugw4yCCyW5yxu334", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"Writing the character 'c' to the file test.txt.\"],\"path\":\"test.txt\",\"content\":\"c\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":1}", + "tool_call_id": "call_SCX1hzuFugw4yCCyW5yxu334" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "The character \"c\" has been successfully written to the file `test.txt`.", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729382648, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1068, + "openAiProcessingMs": 856, + "usage": { + "completion_tokens": 17, + "prompt_tokens": 368, + "total_tokens": 385, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "write \"c\" to the file test.txt", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_SCX1hzuFugw4yCCyW5yxu334", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"Writing the character 'c' to the file test.txt.\"],\"path\":\"test.txt\",\"content\":\"c\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":1}", + "tool_call_id": "call_SCX1hzuFugw4yCCyW5yxu334" + }, + { + "role": "assistant", + "content": "The character \"c\" has been successfully written to the file `test.txt`.", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/files.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_uXPTWZH9tmIXQ0U7yUuAjafu", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"Listing the files in the current directory to see the contents.\"],\"path\":\"\"}" + } + } + ] + }, + "stats": { + "created": 1729382649, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 984, + "openAiProcessingMs": 779, + "usage": { + "completion_tokens": 30, + "prompt_tokens": 398, + "total_tokens": 428, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "write \"c\" to the file test.txt", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_SCX1hzuFugw4yCCyW5yxu334", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"Writing the character 'c' to the file test.txt.\"],\"path\":\"test.txt\",\"content\":\"c\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":1}", + "tool_call_id": "call_SCX1hzuFugw4yCCyW5yxu334" + }, + { + "role": "assistant", + "content": "The character \"c\" has been successfully written to the file `test.txt`.", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "ls", + "role": "user", + "name": "test" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_uXPTWZH9tmIXQ0U7yUuAjafu", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"Listing the files in the current directory to see the contents.\"],\"path\":\"\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\",\"test.txt\"]", + "tool_call_id": "call_uXPTWZH9tmIXQ0U7yUuAjafu" + }, + { + "content": "", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "The files in the current directory are as follows:\n\n- agents/\n- curtains/\n- Definitiions/\n- images/\n- info/\n- stucks/\n- templates/\n- tests/\n- threads/\n- LICENSE\n- README.md\n- test.txt\n\nThe `test.txt` file is present in this directory.", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729382651, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_e2bde53e6e", + "duration": 1623, + "openAiProcessingMs": 1417, + "usage": { + "completion_tokens": 64, + "prompt_tokens": 481, + "total_tokens": 545, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/_import-artifact/guts/__snapshots__/guts-focus.ts.json b/_import-artifact/guts/__snapshots__/guts-focus.ts.json new file mode 100644 index 000000000000..3181edcac6d1 --- /dev/null +++ b/_import-artifact/guts/__snapshots__/guts-focus.ts.json @@ -0,0 +1,8144 @@ +{ + "focus: thread management": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q": [ + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "hello", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Hello! How can I assist you today?", + "name": "agents/reasoner.md", + "refusal": null + }, + "stats": { + "created": 1729382549, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 949, + "openAiProcessingMs": 455, + "usage": { + "completion_tokens": 10, + "prompt_tokens": 2466, + "total_tokens": 2476, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "hello", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today?", + "name": "agents_reasoner_md", + "refusal": null + }, + { + "content": "start a new thread", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_j7FWvhjYdlNxQJri6IpxEfu2", + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "arguments": "{}" + } + } + ] + }, + "stats": { + "created": 1729382551, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1051, + "openAiProcessingMs": 806, + "usage": { + "completion_tokens": 13, + "prompt_tokens": 2506, + "total_tokens": 2519, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + } + ], + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_T20": [ + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "list the files I have", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lFJO7iVhCCDXMtLtv6sqcaZG", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user asked to list the files they have, which suggests that they want to see the directory contents.\",\"The files_ls tool is capable of listing all the files within a specified directory.\",\"Since no specific directory was mentioned, we can assume they want the list of files in the current directory. Using the default path for files_ls, which is \\\".\\\", should provide the necessary information.\"],\"path\":\".\"}" + } + } + ] + }, + "stats": { + "created": 1729382554, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 2002, + "openAiProcessingMs": 1788, + "usage": { + "completion_tokens": 95, + "prompt_tokens": 2470, + "total_tokens": 2565, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "list the files I have", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_lFJO7iVhCCDXMtLtv6sqcaZG", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user asked to list the files they have, which suggests that they want to see the directory contents.\",\"The files_ls tool is capable of listing all the files within a specified directory.\",\"Since no specific directory was mentioned, we can assume they want the list of files in the current directory. Using the default path for files_ls, which is \\\".\\\", should provide the necessary information.\"],\"path\":\".\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\"]", + "tool_call_id": "call_lFJO7iVhCCDXMtLtv6sqcaZG" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Here's a list of the files and directories you have:\n\n- `agents/`\n- `curtains/`\n- `Definitiions/`\n- `images/`\n- `info/`\n- `stucks/`\n- `templates/`\n- `tests/`\n- `threads/`\n- `LICENSE`\n- `README.md`\n\nThese are the directories and files located in the current directory. Let me know if you need further details or actions performed on any of these files or directories!", + "name": "agents/reasoner.md", + "refusal": null + }, + "stats": { + "created": 1729382556, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1930, + "openAiProcessingMs": 1708, + "usage": { + "completion_tokens": 102, + "prompt_tokens": 2616, + "total_tokens": 2718, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + } + ] + } + }, + "focus: update from github": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q": [ + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Update the HAL repo to the latest version by using the system agent as an administrative action", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_k1acDvQZkjImD4SFsISJAyBl", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"I need to check the current directory structure to see if there is a directory for the HAL repo.\",\"Listing the files will help identify where the HAL repository is located.\",\"This will guide the next steps for fetching the latest updates for the repo.\"],\"path\":\".\"}" + } + } + ] + }, + "stats": { + "created": 1729382559, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1479, + "openAiProcessingMs": 959, + "usage": { + "completion_tokens": 66, + "prompt_tokens": 2482, + "total_tokens": 2548, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Update the HAL repo to the latest version by using the system agent as an administrative action", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_k1acDvQZkjImD4SFsISJAyBl", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"I need to check the current directory structure to see if there is a directory for the HAL repo.\",\"Listing the files will help identify where the HAL repository is located.\",\"This will guide the next steps for fetching the latest updates for the repo.\"],\"path\":\".\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\"]", + "tool_call_id": "call_k1acDvQZkjImD4SFsISJAyBl" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_BGfBk775ebJY0zXMQGDBJ2jd", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"To manage the HAL repository, I need to browse into its directory.\",\"The next step is to list files within the 'agents' directory where it is likely located.\",\"This will help confirm the presence and the current state of the HAL repository.\"],\"path\":\"agents\"}" + } + } + ] + }, + "stats": { + "created": 1729382561, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1423, + "openAiProcessingMs": 930, + "usage": { + "completion_tokens": 66, + "prompt_tokens": 2599, + "total_tokens": 2665, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Update the HAL repo to the latest version by using the system agent as an administrative action", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_k1acDvQZkjImD4SFsISJAyBl", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"I need to check the current directory structure to see if there is a directory for the HAL repo.\",\"Listing the files will help identify where the HAL repository is located.\",\"This will guide the next steps for fetching the latest updates for the repo.\"],\"path\":\".\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\"]", + "tool_call_id": "call_k1acDvQZkjImD4SFsISJAyBl" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_BGfBk775ebJY0zXMQGDBJ2jd", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"To manage the HAL repository, I need to browse into its directory.\",\"The next step is to list files within the 'agents' directory where it is likely located.\",\"This will help confirm the presence and the current state of the HAL repository.\"],\"path\":\"agents\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agent-fixture.md\",\"cheeky-bstrd.md\",\"create-agent.md\",\"creatorBot.md\",\"dave-prompt-extractor.md\",\"dreamcatcher.md\",\"dumb-bot.md\",\"example-drone.md\",\"files.md\",\"gpt-4o-mini.md\",\"gpt-4o.md\",\"hal2.md\",\"hal3.md\",\"hamr.md\",\"imogen.md\",\"login-github.md\",\"meetings.md\",\"merger.md\",\"o1-mini.md\",\"o1.md\",\"reasoner.md\",\"remappings.md\",\"router.md\",\"stuck-loop.md\",\"switchboard.md\",\"system.md\",\"test_gen.md\",\"test-assessor.md\",\"test-file-runner.md\",\"test-requester.md\",\"test-results-summary.md\",\"topic.md\"]", + "tool_call_id": "call_BGfBk775ebJY0zXMQGDBJ2jd" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "gpt-4o", + "arguments": "{\"reasoning\":[\"The user wants to update the HAL repository to the latest version using the system agent.\",\"I am not sure which version of the HAL repository we are supposed to update or how the update process should be undertaken.\",\"Given there are files named 'hal2.md' and 'hal3.md' in the agents directory, I need gpt-4o's insight on which version we should focus on and the best approach to update the repository.\",\"This is to avoid making wrong assumptions and to execute the update process correctly.\"],\"content\":\"How should I update the HOL repository to the latest version when there are multiple \\\"hal\\\" files such as hal2.md and hal3.md present in the agents directory? Which file or version should I consider for updating, and what steps should I follow for a successful update using the system agent?\"}" + } + } + ] + }, + "stats": { + "created": 1729382563, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 2892, + "openAiProcessingMs": 2652, + "usage": { + "completion_tokens": 181, + "prompt_tokens": 2831, + "total_tokens": 3012, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1920 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Update the HAL repo to the latest version by using the system agent as an administrative action", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_k1acDvQZkjImD4SFsISJAyBl", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"I need to check the current directory structure to see if there is a directory for the HAL repo.\",\"Listing the files will help identify where the HAL repository is located.\",\"This will guide the next steps for fetching the latest updates for the repo.\"],\"path\":\".\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\"]", + "tool_call_id": "call_k1acDvQZkjImD4SFsISJAyBl" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_BGfBk775ebJY0zXMQGDBJ2jd", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"To manage the HAL repository, I need to browse into its directory.\",\"The next step is to list files within the 'agents' directory where it is likely located.\",\"This will help confirm the presence and the current state of the HAL repository.\"],\"path\":\"agents\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agent-fixture.md\",\"cheeky-bstrd.md\",\"create-agent.md\",\"creatorBot.md\",\"dave-prompt-extractor.md\",\"dreamcatcher.md\",\"dumb-bot.md\",\"example-drone.md\",\"files.md\",\"gpt-4o-mini.md\",\"gpt-4o.md\",\"hal2.md\",\"hal3.md\",\"hamr.md\",\"imogen.md\",\"login-github.md\",\"meetings.md\",\"merger.md\",\"o1-mini.md\",\"o1.md\",\"reasoner.md\",\"remappings.md\",\"router.md\",\"stuck-loop.md\",\"switchboard.md\",\"system.md\",\"test_gen.md\",\"test-assessor.md\",\"test-file-runner.md\",\"test-requester.md\",\"test-results-summary.md\",\"topic.md\"]", + "tool_call_id": "call_BGfBk775ebJY0zXMQGDBJ2jd" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "gpt-4o", + "arguments": "{\"reasoning\":[\"The user wants to update the HAL repository to the latest version using the system agent.\",\"I am not sure which version of the HAL repository we are supposed to update or how the update process should be undertaken.\",\"Given there are files named 'hal2.md' and 'hal3.md' in the agents directory, I need gpt-4o's insight on which version we should focus on and the best approach to update the repository.\",\"This is to avoid making wrong assumptions and to execute the update process correctly.\"],\"content\":\"How should I update the HOL repository to the latest version when there are multiple \\\"hal\\\" files such as hal2.md and hal3.md present in the agents directory? Which file or version should I consider for updating, and what steps should I follow for a successful update using the system agent?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "To update the HOL (Higher Order Logic) repository to the latest version when there are multiple \"hal\" files, such as `hal2.md` and `hal3.md`, you need to consider the following steps:\n\n1. **Understand the Purpose of HAL Files**: \n - HAL files typically serve as documentation or configuration that dictate the behavior of the agents. It's crucial to understand what each file (like `hal2.md` and `hal3.md`) represents. They might be different versions or configurations.\n - Check if there are version logs, comments, or documentation within the repository or files themselves that explain the differences.\n\n2. **Determine the Latest or Appropriate Version**:\n - Check repository documentation, commit messages, or version logs to determine which HAL file is the most recent or appropriate based on your specific requirements.\n - If there is a versioning system in place, the latest file may be indicated by a higher number or a specific tag.\n\n3. **Backup Current State**:\n - It's important to backup the current state of your repository and configuration before making any changes. This includes all HAL files and any configuration or settings related to the system agent.\n\n4. **Update the Repository**:\n - Pull the latest version of the repository using a version control command like `git pull` if you're using Git.\n - Ensure that your local environment is synchronized with the remote repository.\n\n5. **Select the HAL File for the Update**:\n - Once you've determined the appropriate file (for example, `hal3.md` if it's the latest), ensure that it is correctly placed or configured to be used by the system agent.\n - You may need to update configuration settings in the agent to point to the correct HAL file or merge necessary parts from multiple files.\n\n6. **Test the Configuration**:\n - After making changes, thoroughly test to ensure that the system agent works as expected with the selected HAL file. This might involve running predefined test cases or using a test environment.\n - Look out for errors or issues that might arise due to differences in configurations between the HAL files.\n\n7. **Documentation and Rollout**:\n - Update the documentation to reflect the changes made and the rationale behind choosing the specific HAL file.\n - If the update is successful, and you’re satisfied with the changes, proceed with rolling out the update to production environments, ensuring that any relevant team members are informed of the changes.\n\nBy following these steps, you should be able to successfully update your HOL repository with the appropriate configuration while minimizing errors and ensuring a smooth transition.", + "tool_call_id": "call_TrqXpjurgn77TXzAtjLUw3lF" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "I gathered insights into the HAL files present in the agents directory. To proceed with updating the HOL repository using the system agent, you will need to consider which version of the \"hal\" files (e.g., `hal2.md` or `hal3.md`) should be prioritized for the update. \n\nThe reasoning process suggests the following steps:\n1. **Identify the Purpose:** Understand what each HAL file (such as `hal2` and `hal3`) represents—whether they are different versions or configurations.\n2. **Determine the Appropriate Version:** Check the repository's documentation or logs for information on which HAL file is the latest or most appropriate.\n3. **Backup Current State:** Safeguard the current state of the repository and configuration.\n4. **Update Your Environment:** Synchronize your local environment with the remote repository.\n5. **Choose the Right HAL File:** Ensure the system agent uses the correct HAL file and adjust any configurations necessary.\n6. **Test:** Perform thorough testing to ensure the system agent works as expected with the chosen HAL file.\n7. **Document and Rollout:** Update documentation, ensure team members are informed, and implement the update if successful.\n\nIf you need to proceed with any specific tasks such as accessing or updating files, or if there are additional instructions based on the agent-specific methodology for updating, please let me know!", + "name": "agents/reasoner.md", + "refusal": null + }, + "stats": { + "created": 1729382573, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 4131, + "openAiProcessingMs": 3890, + "usage": { + "completion_tokens": 274, + "prompt_tokens": 3549, + "total_tokens": 3823, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 2176 + } + } + } + } + } + ], + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q/napp-gpt-4o-10": [ + { + "request": { + "model": "gpt-4o", + "messages": [ + { + "content": "How should I update the HOL repository to the latest version when there are multiple \"hal\" files such as hal2.md and hal3.md present in the agents directory? Which file or version should I consider for updating, and what steps should I follow for a successful update using the system agent?", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "content": "Your name is gpt-4o, or 4o for short.", + "role": "system", + "name": "agents_gpt-4o_md" + } + ], + "seed": 1337 + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "To update the HOL (Higher Order Logic) repository to the latest version when there are multiple \"hal\" files, such as `hal2.md` and `hal3.md`, you need to consider the following steps:\n\n1. **Understand the Purpose of HAL Files**: \n - HAL files typically serve as documentation or configuration that dictate the behavior of the agents. It's crucial to understand what each file (like `hal2.md` and `hal3.md`) represents. They might be different versions or configurations.\n - Check if there are version logs, comments, or documentation within the repository or files themselves that explain the differences.\n\n2. **Determine the Latest or Appropriate Version**:\n - Check repository documentation, commit messages, or version logs to determine which HAL file is the most recent or appropriate based on your specific requirements.\n - If there is a versioning system in place, the latest file may be indicated by a higher number or a specific tag.\n\n3. **Backup Current State**:\n - It's important to backup the current state of your repository and configuration before making any changes. This includes all HAL files and any configuration or settings related to the system agent.\n\n4. **Update the Repository**:\n - Pull the latest version of the repository using a version control command like `git pull` if you're using Git.\n - Ensure that your local environment is synchronized with the remote repository.\n\n5. **Select the HAL File for the Update**:\n - Once you've determined the appropriate file (for example, `hal3.md` if it's the latest), ensure that it is correctly placed or configured to be used by the system agent.\n - You may need to update configuration settings in the agent to point to the correct HAL file or merge necessary parts from multiple files.\n\n6. **Test the Configuration**:\n - After making changes, thoroughly test to ensure that the system agent works as expected with the selected HAL file. This might involve running predefined test cases or using a test environment.\n - Look out for errors or issues that might arise due to differences in configurations between the HAL files.\n\n7. **Documentation and Rollout**:\n - Update the documentation to reflect the changes made and the rationale behind choosing the specific HAL file.\n - If the update is successful, and you’re satisfied with the changes, proceed with rolling out the update to production environments, ensuring that any relevant team members are informed of the changes.\n\nBy following these steps, you should be able to successfully update your HOL repository with the appropriate configuration while minimizing errors and ensuring a smooth transition.", + "name": "agents/gpt-4o.md", + "refusal": null + }, + "stats": { + "created": 1729382567, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_a7d06e42a7", + "duration": 5033, + "openAiProcessingMs": 4832, + "usage": { + "completion_tokens": 521, + "prompt_tokens": 108, + "total_tokens": 629, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + }, + "focus: infinite loop regression": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q": [ + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Write a file with the following text \"I love to be in Paris in the Spring\". Then save it as paris.txt. Then replace all text in that file where \"Paris\" occurs with \"Edinburgh\". Then rename the file Edinburgh.txt", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_UkeTB2ekFfwVXeSR8BR3s46c", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"I need to write a file with the specified content.\",\"The desired content to write in the file is 'I love to be in Paris in the Spring'.\",\"The file is to be named 'paris.txt'.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Paris in the Spring\"}" + } + } + ] + }, + "stats": { + "created": 1729382578, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1863, + "openAiProcessingMs": 1345, + "usage": { + "completion_tokens": 76, + "prompt_tokens": 2513, + "total_tokens": 2589, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Write a file with the following text \"I love to be in Paris in the Spring\". Then save it as paris.txt. Then replace all text in that file where \"Paris\" occurs with \"Edinburgh\". Then rename the file Edinburgh.txt", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_UkeTB2ekFfwVXeSR8BR3s46c", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"I need to write a file with the specified content.\",\"The desired content to write in the file is 'I love to be in Paris in the Spring'.\",\"The file is to be named 'paris.txt'.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Paris in the Spring\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":35}", + "tool_call_id": "call_UkeTB2ekFfwVXeSR8BR3s46c" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Hkt2sxaKZwp3GSt231P2qlF7", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"I need to read the content of the file 'paris.txt' to find and replace all instances of 'Paris' with 'Edinburgh'.\",\"After reading the content, I'll modify it accordingly.\"],\"path\":\"paris.txt\"}" + } + } + ] + }, + "stats": { + "created": 1729382581, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1549, + "openAiProcessingMs": 1061, + "usage": { + "completion_tokens": 60, + "prompt_tokens": 2608, + "total_tokens": 2668, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Write a file with the following text \"I love to be in Paris in the Spring\". Then save it as paris.txt. Then replace all text in that file where \"Paris\" occurs with \"Edinburgh\". Then rename the file Edinburgh.txt", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_UkeTB2ekFfwVXeSR8BR3s46c", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"I need to write a file with the specified content.\",\"The desired content to write in the file is 'I love to be in Paris in the Spring'.\",\"The file is to be named 'paris.txt'.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Paris in the Spring\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":35}", + "tool_call_id": "call_UkeTB2ekFfwVXeSR8BR3s46c" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Hkt2sxaKZwp3GSt231P2qlF7", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"I need to read the content of the file 'paris.txt' to find and replace all instances of 'Paris' with 'Edinburgh'.\",\"After reading the content, I'll modify it accordingly.\"],\"path\":\"paris.txt\"}" + } + } + ] + }, + { + "role": "tool", + "content": "I love to be in Paris in the Spring", + "tool_call_id": "call_Hkt2sxaKZwp3GSt231P2qlF7" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_cRcnTMu4Y0GMv2FtlASGaa5H", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"After reading the content of 'paris.txt', I will replace 'Paris' with 'Edinburgh' in the text.\",\"The modified text will then be written back to the file, replacing the old content.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Edinburgh in the Spring\"}" + } + } + ] + }, + "stats": { + "created": 1729382583, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1675, + "openAiProcessingMs": 1462, + "usage": { + "completion_tokens": 74, + "prompt_tokens": 2690, + "total_tokens": 2764, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1920 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Write a file with the following text \"I love to be in Paris in the Spring\". Then save it as paris.txt. Then replace all text in that file where \"Paris\" occurs with \"Edinburgh\". Then rename the file Edinburgh.txt", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_UkeTB2ekFfwVXeSR8BR3s46c", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"I need to write a file with the specified content.\",\"The desired content to write in the file is 'I love to be in Paris in the Spring'.\",\"The file is to be named 'paris.txt'.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Paris in the Spring\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":35}", + "tool_call_id": "call_UkeTB2ekFfwVXeSR8BR3s46c" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Hkt2sxaKZwp3GSt231P2qlF7", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"I need to read the content of the file 'paris.txt' to find and replace all instances of 'Paris' with 'Edinburgh'.\",\"After reading the content, I'll modify it accordingly.\"],\"path\":\"paris.txt\"}" + } + } + ] + }, + { + "role": "tool", + "content": "I love to be in Paris in the Spring", + "tool_call_id": "call_Hkt2sxaKZwp3GSt231P2qlF7" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_cRcnTMu4Y0GMv2FtlASGaa5H", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"After reading the content of 'paris.txt', I will replace 'Paris' with 'Edinburgh' in the text.\",\"The modified text will then be written back to the file, replacing the old content.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Edinburgh in the Spring\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":39}", + "tool_call_id": "call_cRcnTMu4Y0GMv2FtlASGaa5H" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_pklki7uYxadD9DXy0ASNloJN", + "type": "function", + "function": { + "name": "files_mv", + "arguments": "{\"reasoning\":[\"Having replaced 'Paris' with 'Edinburgh' in 'paris.txt', the final step is to rename the file to 'Edinburgh.txt'.\",\"This will complete the task as described by the user.\"],\"from\":\"paris.txt\",\"to\":\"Edinburgh.txt\"}" + } + } + ] + }, + "stats": { + "created": 1729382586, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1515, + "openAiProcessingMs": 1061, + "usage": { + "completion_tokens": 68, + "prompt_tokens": 2783, + "total_tokens": 2851, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 2048 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "Write a file with the following text \"I love to be in Paris in the Spring\". Then save it as paris.txt. Then replace all text in that file where \"Paris\" occurs with \"Edinburgh\". Then rename the file Edinburgh.txt", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_UkeTB2ekFfwVXeSR8BR3s46c", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"I need to write a file with the specified content.\",\"The desired content to write in the file is 'I love to be in Paris in the Spring'.\",\"The file is to be named 'paris.txt'.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Paris in the Spring\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":35}", + "tool_call_id": "call_UkeTB2ekFfwVXeSR8BR3s46c" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_Hkt2sxaKZwp3GSt231P2qlF7", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"I need to read the content of the file 'paris.txt' to find and replace all instances of 'Paris' with 'Edinburgh'.\",\"After reading the content, I'll modify it accordingly.\"],\"path\":\"paris.txt\"}" + } + } + ] + }, + { + "role": "tool", + "content": "I love to be in Paris in the Spring", + "tool_call_id": "call_Hkt2sxaKZwp3GSt231P2qlF7" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_cRcnTMu4Y0GMv2FtlASGaa5H", + "type": "function", + "function": { + "name": "files_write", + "arguments": "{\"reasoning\":[\"After reading the content of 'paris.txt', I will replace 'Paris' with 'Edinburgh' in the text.\",\"The modified text will then be written back to the file, replacing the old content.\"],\"path\":\"paris.txt\",\"content\":\"I love to be in Edinburgh in the Spring\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"charactersWritten\":39}", + "tool_call_id": "call_cRcnTMu4Y0GMv2FtlASGaa5H" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_pklki7uYxadD9DXy0ASNloJN", + "type": "function", + "function": { + "name": "files_mv", + "arguments": "{\"reasoning\":[\"Having replaced 'Paris' with 'Edinburgh' in 'paris.txt', the final step is to rename the file to 'Edinburgh.txt'.\",\"This will complete the task as described by the user.\"],\"from\":\"paris.txt\",\"to\":\"Edinburgh.txt\"}" + } + } + ] + }, + { + "role": "tool", + "content": "", + "tool_call_id": "call_pklki7uYxadD9DXy0ASNloJN" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "I have successfully completed the task you described:\n\n1. Created a file named `paris.txt` with the content: \"I love to be in Paris in the Spring\".\n2. Replaced \"Paris\" with \"Edinburgh\" in the file content.\n3. Renamed the file from `paris.txt` to `Edinburgh.txt`.\n\nIf you need any more assistance or have further instructions, feel free to let me know!", + "name": "agents/reasoner.md", + "refusal": null + }, + "stats": { + "created": 1729382588, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1816, + "openAiProcessingMs": 1342, + "usage": { + "completion_tokens": 89, + "prompt_tokens": 2864, + "total_tokens": 2953, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 2048 + } + } + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/_import-artifact/guts/__snapshots__/guts-longthread.ts.json b/_import-artifact/guts/__snapshots__/guts-longthread.ts.json new file mode 100644 index 000000000000..2527c126b4f3 --- /dev/null +++ b/_import-artifact/guts/__snapshots__/guts-longthread.ts.json @@ -0,0 +1,371 @@ +{ + "longthread: basic": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "say \"Hello\"", + "role": "user", + "name": "ai" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Hello!", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729382538, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 577, + "openAiProcessingMs": 339, + "usage": { + "completion_tokens": 3, + "prompt_tokens": 85, + "total_tokens": 88, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + }, + "longthread: chat": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "say the word: \"hello\" verbatim without calling any functions", + "role": "user", + "name": "ai" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "hello", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729382540, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 401, + "openAiProcessingMs": 187, + "usage": { + "completion_tokens": 2, + "prompt_tokens": 94, + "total_tokens": 96, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "say the word: \"hello\" verbatim without calling any functions", + "role": "user", + "name": "ai" + }, + { + "role": "assistant", + "content": "hello", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "what is your name ?", + "role": "user", + "name": "ai" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Assistant.", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729382541, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 391, + "openAiProcessingMs": 183, + "usage": { + "completion_tokens": 3, + "prompt_tokens": 116, + "total_tokens": 119, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "say the word: \"hello\" verbatim without calling any functions", + "role": "user", + "name": "ai" + }, + { + "role": "assistant", + "content": "hello", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "what is your name ?", + "role": "user", + "name": "ai" + }, + { + "role": "assistant", + "content": "Assistant.", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "repeat your last, without calling any functions", + "role": "user", + "name": "ai" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Assistant.", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729382542, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 467, + "openAiProcessingMs": 257, + "usage": { + "completion_tokens": 3, + "prompt_tokens": 142, + "total_tokens": 145, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/_import-artifact/guts/__snapshots__/guts-tps-reports.ts.json b/_import-artifact/guts/__snapshots__/guts-tps-reports.ts.json new file mode 100644 index 000000000000..4a4bb9dbaf48 --- /dev/null +++ b/_import-artifact/guts/__snapshots__/guts-tps-reports.ts.json @@ -0,0 +1,1890 @@ +{ + "tps: run fixture test": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q": [ + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "run test ./tests/test-fixture.test.md", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_KGMa7arG8TQIqqv1fqTDqdqB", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"I need to read the content of the specified test file to understand what tests are described in it.\",\"Running a test typically involves reading the test file, understanding the setup and assertions, and then executing the test logic.\",\"Before proceeding to execute or simulate the tests, I should gather the required details from the file itself.\"],\"path\":\"./tests/test-fixture.test.md\"}" + } + } + ] + }, + "stats": { + "created": 1729382600, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1680, + "openAiProcessingMs": 1170, + "usage": { + "completion_tokens": 87, + "prompt_tokens": 2474, + "total_tokens": 2561, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "run test ./tests/test-fixture.test.md", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_KGMa7arG8TQIqqv1fqTDqdqB", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"I need to read the content of the specified test file to understand what tests are described in it.\",\"Running a test typically involves reading the test file, understanding the setup and assertions, and then executing the test logic.\",\"Before proceeding to execute or simulate the tests, I should gather the required details from the file itself.\"],\"path\":\"./tests/test-fixture.test.md\"}" + } + } + ] + }, + { + "role": "tool", + "content": "---\ntarget: agents/agent-fixture.md\nassessor: agents/test-assessor.md\n---\n\n## Call the local function\n\n**Prompts**\n\n- Call the local function\n\n**Expectations**\n\n- local function was called once\n- result was \"local reply\"\n\n## Fail a test\n\n**Prompts**\n\n- Call the local function\n\n**Expectations**\n\n- result was \"this expectation should fail\"\n\n## Throw an error\n\n**Prompts**\n\n- Call the error function\n\n**Expectations**\n\n- an error was thrown\n\n## Dependencies function\n\n**Dependencies**\n\n- Call the local function\n\n**Prompts**\n\n- say hello\n\n**Expectations**\n\n- hello was responded to\n- local function was called once\n\n## Multiple Dependencies\n\n**Dependencies**\n\n- Dependencies function\n- Call the local function\n\n**Prompts**\n\n- say goodbye\n\n**Expectations**\n\n- goodbye was responded to\n- local function was called twice\n- in between the two function calls, a \"hello\" was issued by the user\n", + "tool_call_id": "call_KGMa7arG8TQIqqv1fqTDqdqB" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/reasoner.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_rcXdAKhP2tt3r5UvqwxOM0X3", + "type": "function", + "function": { + "name": "o1-mini", + "arguments": "{\"reasoning\":[\"The file content indicates that there are multiple tests described, each with specific prompts and expectations.\",\"Understanding the logic behind these tests is crucial, particularly from a coding and test running standpoint.\",\"Analyzing the content to determine how these tests interrelate and should be executed will inform the steps needed to run the test suite effectively.\"],\"content\":\"We have a test file with several test cases defined. The tests include calling a local function, failing a test, throwing an error, testing dependencies, and handling multiple dependencies. \\n\\nThe tests are structured with prompts and expectations:\\n\\n1. **Call the local function**\\n - Expectations: The local function is called once and returns \\\"local reply\\\".\\n\\n2. **Fail a test**\\n - Expectations: The result should be \\\"this expectation should fail\\\".\\n\\n3. **Throw an error**\\n - Expectations: An error is thrown when the error function is invoked.\\n\\n4. **Dependencies function**\\n - Dependencies: This involves calling the local function.\\n - Expectations: A \\\"hello\\\" response should be given after a local function call.\\n\\n5. **Multiple Dependencies**\\n - Dependencies include: the Dependencies function and calling the local function.\\n - Expectations: A \\\"goodbye\\\" response should be given after two calls to the local function, with a \\\"hello\\\" in between.\\n\\nGiven this data, how should we best approach running these tests? What considerations should we have in terms of order of execution and dependencies, and how to ensure all expectations are met accurately?\"}" + } + } + ] + }, + "stats": { + "created": 1729382602, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 4655, + "openAiProcessingMs": 4419, + "usage": { + "completion_tokens": 343, + "prompt_tokens": 2779, + "total_tokens": 3122, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1792 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "run test ./tests/test-fixture.test.md", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_KGMa7arG8TQIqqv1fqTDqdqB", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"I need to read the content of the specified test file to understand what tests are described in it.\",\"Running a test typically involves reading the test file, understanding the setup and assertions, and then executing the test logic.\",\"Before proceeding to execute or simulate the tests, I should gather the required details from the file itself.\"],\"path\":\"./tests/test-fixture.test.md\"}" + } + } + ] + }, + { + "role": "tool", + "content": "---\ntarget: agents/agent-fixture.md\nassessor: agents/test-assessor.md\n---\n\n## Call the local function\n\n**Prompts**\n\n- Call the local function\n\n**Expectations**\n\n- local function was called once\n- result was \"local reply\"\n\n## Fail a test\n\n**Prompts**\n\n- Call the local function\n\n**Expectations**\n\n- result was \"this expectation should fail\"\n\n## Throw an error\n\n**Prompts**\n\n- Call the error function\n\n**Expectations**\n\n- an error was thrown\n\n## Dependencies function\n\n**Dependencies**\n\n- Call the local function\n\n**Prompts**\n\n- say hello\n\n**Expectations**\n\n- hello was responded to\n- local function was called once\n\n## Multiple Dependencies\n\n**Dependencies**\n\n- Dependencies function\n- Call the local function\n\n**Prompts**\n\n- say goodbye\n\n**Expectations**\n\n- goodbye was responded to\n- local function was called twice\n- in between the two function calls, a \"hello\" was issued by the user\n", + "tool_call_id": "call_KGMa7arG8TQIqqv1fqTDqdqB" + }, + { + "role": "assistant", + "content": null, + "name": "agents_reasoner_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_rcXdAKhP2tt3r5UvqwxOM0X3", + "type": "function", + "function": { + "name": "o1-mini", + "arguments": "{\"reasoning\":[\"The file content indicates that there are multiple tests described, each with specific prompts and expectations.\",\"Understanding the logic behind these tests is crucial, particularly from a coding and test running standpoint.\",\"Analyzing the content to determine how these tests interrelate and should be executed will inform the steps needed to run the test suite effectively.\"],\"content\":\"We have a test file with several test cases defined. The tests include calling a local function, failing a test, throwing an error, testing dependencies, and handling multiple dependencies. \\n\\nThe tests are structured with prompts and expectations:\\n\\n1. **Call the local function**\\n - Expectations: The local function is called once and returns \\\"local reply\\\".\\n\\n2. **Fail a test**\\n - Expectations: The result should be \\\"this expectation should fail\\\".\\n\\n3. **Throw an error**\\n - Expectations: An error is thrown when the error function is invoked.\\n\\n4. **Dependencies function**\\n - Dependencies: This involves calling the local function.\\n - Expectations: A \\\"hello\\\" response should be given after a local function call.\\n\\n5. **Multiple Dependencies**\\n - Dependencies include: the Dependencies function and calling the local function.\\n - Expectations: A \\\"goodbye\\\" response should be given after two calls to the local function, with a \\\"hello\\\" in between.\\n\\nGiven this data, how should we best approach running these tests? What considerations should we have in terms of order of execution and dependencies, and how to ensure all expectations are met accurately?\"}" + } + } + ] + }, + { + "role": "tool", + "content": "To effectively run your test suite and ensure that all expectations are met accurately, it's essential to consider the structure, dependencies, and order of execution of each test case. Below is a comprehensive approach to handling your tests:\n\n### 1. **Understand the Test Structure and Dependencies**\n\nFirst, let's summarize the tests and their dependencies:\n\n1. **Call the Local Function**\n - **Expectation:** The local function is called once and returns `\"local reply\"`.\n - **Dependencies:** None.\n\n2. **Fail a Test**\n - **Expectation:** The result should be `\"this expectation should fail\"`.\n - **Dependencies:** None.\n\n3. **Throw an Error**\n - **Expectation:** An error is thrown when the error function is invoked.\n - **Dependencies:** None.\n\n4. **Dependencies Function**\n - **Dependencies:** Calls the **Call the Local Function**.\n - **Expectation:** A `\"hello\"` response should be given after a local function call.\n\n5. **Multiple Dependencies**\n - **Dependencies:** \n - **Dependencies Function** (which in turn depends on **Call the Local Function**)\n - Calls the **Call the Local Function** again.\n - **Expectation:** A `\"goodbye\"` response should be given after two calls to the local function, with a `\"hello\"` in between.\n\n### 2. **Determine the Order of Execution**\n\nGiven the dependencies, it's crucial to execute the tests in an order that respects these relationships to avoid failures due to unmet dependencies. Here's a recommended order:\n\n1. **Call the Local Function**\n2. **Dependencies Function**\n3. **Multiple Dependencies**\n4. **Throw an Error**\n5. **Fail a Test**\n\n**Rationale:**\n\n- **Call the Local Function** should run first as it serves as a foundational test that other tests depend on.\n- **Dependencies Function** relies on the local function, so it should execute after the local function test.\n- **Multiple Dependencies** depends on both the Dependencies Function and another call to the local function, so it naturally follows.\n- **Throw an Error** and **Fail a Test** are independent of the others and can run afterward. Running them last ensures that their failure or exception handling doesn't interfere with the dependent tests.\n\n### 3. **Isolate Tests to Prevent Side Effects**\n\nTo ensure that each test runs in a controlled environment without unintended interactions:\n\n- **Use Setup and Teardown Methods:**\n - **Setup:** Initialize the necessary environment before each test. For example, mock functions or reset states.\n - **Teardown:** Clean up after each test to ensure that no residual data affects subsequent tests.\n\n- **Mock Dependencies:**\n - For tests that depend on functions like the local function, use mocking frameworks (e.g., Jest mocks) to simulate their behavior. This ensures that tests remain isolated and predictable.\n\n### 4. **Handle Assertions Accurately**\n\nEnsure that each test accurately verifies its expectations:\n\n1. **Call the Local Function**\n - **Assertion:** Verify that the local function is called exactly once.\n - **Return Value Assertion:** Confirm that it returns `\"local reply\"`.\n\n2. **Fail a Test**\n - **Assertion:** Intentionally set the expected result to `\"this expectation should fail\"`, which contradicts the actual outcome to simulate a failure scenario.\n\n3. **Throw an Error**\n - **Assertion:** Use assertion methods that expect an error to be thrown (e.g., `toThrow` in Jest).\n\n4. **Dependencies Function**\n - **Assertion:** After the local function call, verify that the response is `\"hello\"`.\n\n5. **Multiple Dependencies**\n - **Assertion:** Ensure that after two calls to the local function, with a `\"hello\"` in between, the final response is `\"goodbye\"`.\n\n### 5. **Leverage Testing Framework Features**\n\nUtilize features provided by modern testing frameworks to enhance your test suite:\n\n- **Test Suites and Grouping:**\n - Organize related tests into suites or groups to improve readability and maintainability.\n\n- **BeforeAll and AfterAll Hooks:**\n - For setup and teardown processes that are common across multiple tests.\n\n- **Parameterized Tests:**\n - If applicable, use parameterized tests to run the same test logic with different inputs.\n\n### 6. **Continuous Integration and Automated Testing**\n\nIntegrate your test suite into a CI/CD pipeline to ensure that tests run automatically with each code change, maintaining code quality and catching issues early.\n\n### 7. **Example Implementation Using Jest**\n\nHere's a conceptual example using Jest, a popular JavaScript testing framework:\n\n```javascript\n// localModule.js\nexport const localFunction = () => \"local reply\";\n\nexport const dependenciesFunction = () => {\n const reply = localFunction();\n return \"hello\";\n};\n\nexport const multipleDependenciesFunction = () => {\n const firstReply = localFunction();\n const secondReply = dependenciesFunction();\n const finalReply = localFunction();\n return \"goodbye\";\n};\n\nexport const errorFunction = () => {\n throw new Error(\"Intentional Error\");\n};\n```\n\n```javascript\n// localModule.test.js\nimport {\n localFunction,\n dependenciesFunction,\n multipleDependenciesFunction,\n errorFunction\n} from './localModule';\n\ndescribe('Local Module Tests', () => {\n\n beforeEach(() => {\n // Reset mocks or any state if necessary\n });\n\n test('Call the local function', () => {\n const reply = localFunction();\n expect(reply).toBe(\"local reply\");\n // If using spies to check call count:\n // const spy = jest.spyOn(module, 'localFunction');\n // expect(spy).toHaveBeenCalledTimes(1);\n });\n\n test('Dependencies function', () => {\n const reply = dependenciesFunction();\n expect(reply).toBe(\"hello\");\n });\n\n test('Multiple Dependencies', () => {\n const reply = multipleDependenciesFunction();\n expect(reply).toBe(\"goodbye\");\n });\n\n test('Throw an error', () => {\n expect(() => errorFunction()).toThrow(\"Intentional Error\");\n });\n\n test('Fail a test', () => {\n const result = \"some actual result\";\n expect(result).toBe(\"this expectation should fail\");\n });\n\n});\n```\n\n**Notes:**\n\n- **Spies and Mocks:** To assert that `localFunction` is called a specific number of times, you can use Jest's `jest.spyOn` or `jest.mock` features.\n \n ```javascript\n test('Call the local function once', () => {\n const spy = jest.spyOn(localModule, 'localFunction');\n const reply = localFunction();\n expect(spy).toHaveBeenCalledTimes(1);\n expect(reply).toBe(\"local reply\");\n spy.mockRestore();\n });\n ```\n\n- **Test Order:** Jest runs tests in the order they are defined within a `describe` block. Ensure that dependent tests are placed after their dependencies.\n\n- **Failing Tests:** Intentionally failing a test can be useful to ensure your test suite correctly identifies failures, but remember to update or remove such tests once they've served their purpose to avoid false negatives in your CI pipeline.\n\n### 8. **Best Practices Summary**\n\n- **Respect Dependencies:** Structure your tests to run dependent tests after their dependencies have been validated.\n- **Isolate Tests:** Ensure that tests do not interfere with each other by sharing state or side effects.\n- **Use Mocks and Spies:** Leverage mocking to simulate dependencies and verify interactions.\n- **Clear Assertions:** Make sure each test clearly and accurately asserts its expected outcome.\n- **Maintain Order:** While tests should ideally be independent, when dependencies exist, carefully manage the order of execution.\n- **Automate and Integrate:** Use CI/CD pipelines to run tests automatically and maintain code quality.\n\nBy following this structured approach, you can ensure that your test suite is robust, reliable, and maintains high code quality standards.", + "tool_call_id": "call_rcXdAKhP2tt3r5UvqwxOM0X3" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "To execute the test cases, you will need a suitable environment to simulate running these tests, such as using a JavaScript testing framework like Jest. Here’s a conceptual breakdown of how you can implement this with Jest:\n\n1. **Setup Your Test Environment**: Ensure you have Jest installed, and you're familiar with basic test case writing in it.\n\n2. **Implement the Functions for Testing**: Based on the descriptions provided, you will need functions that can be tested according to the specifications. Make sure to implement functions that match the names and behaviors expected by the test descriptions.\n\n3. **Write the Test Suite**: Within your Jest test file, you should write test cases for each described scenario. Here’s a basic setup:\n\n ```javascript\n // functions.js\n export function localFunction() {\n return \"local reply\";\n }\n\n export function functionThatThrows() {\n throw new Error(\"Error was thrown\");\n }\n\n // tests/functions.test.js\n import { localFunction, functionThatThrows } from './functions';\n\n describe('Local Function Tests', () => {\n test('Call the local function', () => {\n const result = localFunction();\n expect(result).toBe(\"local reply\");\n });\n\n test('Fail a test', () => {\n const result = localFunction();\n expect(result).toBe(\"this expectation should fail\");\n });\n\n test('Throw an error', () => {\n expect(() => functionThatThrows()).toThrow(\"Error was thrown\");\n });\n\n test('Dependencies function', () => {\n // Assuming dependencies function exists and calls localFunction\n const response = dependenciesFunction();\n // Mock the function call and check behaviors\n });\n\n test('Multiple Dependencies', () => {\n // Simulate calling multiple dependencies and checking return values\n });\n });\n ```\n\n4. **Run Your Tests**: Use the Jest command line interface to run your tests and check the output to ensure that each test behaves as expected. The failed test should fail intentionally, and the exception-throwing test should handle the exception as expected.\n\nWould you like detailed guidance on setting up these functions or executing them in a specific environment?", + "name": "agents/reasoner.md", + "refusal": null + }, + "stats": { + "created": 1729382620, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 6231, + "openAiProcessingMs": 5981, + "usage": { + "completion_tokens": 446, + "prompt_tokens": 4767, + "total_tokens": 5213, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 2048 + } + } + } + } + } + ], + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/the_F6Q/napp-o1-mini-7": [ + { + "request": { + "model": "o1-mini", + "messages": [ + { + "content": "We have a test file with several test cases defined. The tests include calling a local function, failing a test, throwing an error, testing dependencies, and handling multiple dependencies. \n\nThe tests are structured with prompts and expectations:\n\n1. **Call the local function**\n - Expectations: The local function is called once and returns \"local reply\".\n\n2. **Fail a test**\n - Expectations: The result should be \"this expectation should fail\".\n\n3. **Throw an error**\n - Expectations: An error is thrown when the error function is invoked.\n\n4. **Dependencies function**\n - Dependencies: This involves calling the local function.\n - Expectations: A \"hello\" response should be given after a local function call.\n\n5. **Multiple Dependencies**\n - Dependencies include: the Dependencies function and calling the local function.\n - Expectations: A \"goodbye\" response should be given after two calls to the local function, with a \"hello\" in between.\n\nGiven this data, how should we best approach running these tests? What considerations should we have in terms of order of execution and dependencies, and how to ensure all expectations are met accurately?", + "role": "user", + "name": "act_H2R5R3HR4MCFYEB0" + } + ], + "seed": 1337 + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "To effectively run your test suite and ensure that all expectations are met accurately, it's essential to consider the structure, dependencies, and order of execution of each test case. Below is a comprehensive approach to handling your tests:\n\n### 1. **Understand the Test Structure and Dependencies**\n\nFirst, let's summarize the tests and their dependencies:\n\n1. **Call the Local Function**\n - **Expectation:** The local function is called once and returns `\"local reply\"`.\n - **Dependencies:** None.\n\n2. **Fail a Test**\n - **Expectation:** The result should be `\"this expectation should fail\"`.\n - **Dependencies:** None.\n\n3. **Throw an Error**\n - **Expectation:** An error is thrown when the error function is invoked.\n - **Dependencies:** None.\n\n4. **Dependencies Function**\n - **Dependencies:** Calls the **Call the Local Function**.\n - **Expectation:** A `\"hello\"` response should be given after a local function call.\n\n5. **Multiple Dependencies**\n - **Dependencies:** \n - **Dependencies Function** (which in turn depends on **Call the Local Function**)\n - Calls the **Call the Local Function** again.\n - **Expectation:** A `\"goodbye\"` response should be given after two calls to the local function, with a `\"hello\"` in between.\n\n### 2. **Determine the Order of Execution**\n\nGiven the dependencies, it's crucial to execute the tests in an order that respects these relationships to avoid failures due to unmet dependencies. Here's a recommended order:\n\n1. **Call the Local Function**\n2. **Dependencies Function**\n3. **Multiple Dependencies**\n4. **Throw an Error**\n5. **Fail a Test**\n\n**Rationale:**\n\n- **Call the Local Function** should run first as it serves as a foundational test that other tests depend on.\n- **Dependencies Function** relies on the local function, so it should execute after the local function test.\n- **Multiple Dependencies** depends on both the Dependencies Function and another call to the local function, so it naturally follows.\n- **Throw an Error** and **Fail a Test** are independent of the others and can run afterward. Running them last ensures that their failure or exception handling doesn't interfere with the dependent tests.\n\n### 3. **Isolate Tests to Prevent Side Effects**\n\nTo ensure that each test runs in a controlled environment without unintended interactions:\n\n- **Use Setup and Teardown Methods:**\n - **Setup:** Initialize the necessary environment before each test. For example, mock functions or reset states.\n - **Teardown:** Clean up after each test to ensure that no residual data affects subsequent tests.\n\n- **Mock Dependencies:**\n - For tests that depend on functions like the local function, use mocking frameworks (e.g., Jest mocks) to simulate their behavior. This ensures that tests remain isolated and predictable.\n\n### 4. **Handle Assertions Accurately**\n\nEnsure that each test accurately verifies its expectations:\n\n1. **Call the Local Function**\n - **Assertion:** Verify that the local function is called exactly once.\n - **Return Value Assertion:** Confirm that it returns `\"local reply\"`.\n\n2. **Fail a Test**\n - **Assertion:** Intentionally set the expected result to `\"this expectation should fail\"`, which contradicts the actual outcome to simulate a failure scenario.\n\n3. **Throw an Error**\n - **Assertion:** Use assertion methods that expect an error to be thrown (e.g., `toThrow` in Jest).\n\n4. **Dependencies Function**\n - **Assertion:** After the local function call, verify that the response is `\"hello\"`.\n\n5. **Multiple Dependencies**\n - **Assertion:** Ensure that after two calls to the local function, with a `\"hello\"` in between, the final response is `\"goodbye\"`.\n\n### 5. **Leverage Testing Framework Features**\n\nUtilize features provided by modern testing frameworks to enhance your test suite:\n\n- **Test Suites and Grouping:**\n - Organize related tests into suites or groups to improve readability and maintainability.\n\n- **BeforeAll and AfterAll Hooks:**\n - For setup and teardown processes that are common across multiple tests.\n\n- **Parameterized Tests:**\n - If applicable, use parameterized tests to run the same test logic with different inputs.\n\n### 6. **Continuous Integration and Automated Testing**\n\nIntegrate your test suite into a CI/CD pipeline to ensure that tests run automatically with each code change, maintaining code quality and catching issues early.\n\n### 7. **Example Implementation Using Jest**\n\nHere's a conceptual example using Jest, a popular JavaScript testing framework:\n\n```javascript\n// localModule.js\nexport const localFunction = () => \"local reply\";\n\nexport const dependenciesFunction = () => {\n const reply = localFunction();\n return \"hello\";\n};\n\nexport const multipleDependenciesFunction = () => {\n const firstReply = localFunction();\n const secondReply = dependenciesFunction();\n const finalReply = localFunction();\n return \"goodbye\";\n};\n\nexport const errorFunction = () => {\n throw new Error(\"Intentional Error\");\n};\n```\n\n```javascript\n// localModule.test.js\nimport {\n localFunction,\n dependenciesFunction,\n multipleDependenciesFunction,\n errorFunction\n} from './localModule';\n\ndescribe('Local Module Tests', () => {\n\n beforeEach(() => {\n // Reset mocks or any state if necessary\n });\n\n test('Call the local function', () => {\n const reply = localFunction();\n expect(reply).toBe(\"local reply\");\n // If using spies to check call count:\n // const spy = jest.spyOn(module, 'localFunction');\n // expect(spy).toHaveBeenCalledTimes(1);\n });\n\n test('Dependencies function', () => {\n const reply = dependenciesFunction();\n expect(reply).toBe(\"hello\");\n });\n\n test('Multiple Dependencies', () => {\n const reply = multipleDependenciesFunction();\n expect(reply).toBe(\"goodbye\");\n });\n\n test('Throw an error', () => {\n expect(() => errorFunction()).toThrow(\"Intentional Error\");\n });\n\n test('Fail a test', () => {\n const result = \"some actual result\";\n expect(result).toBe(\"this expectation should fail\");\n });\n\n});\n```\n\n**Notes:**\n\n- **Spies and Mocks:** To assert that `localFunction` is called a specific number of times, you can use Jest's `jest.spyOn` or `jest.mock` features.\n \n ```javascript\n test('Call the local function once', () => {\n const spy = jest.spyOn(localModule, 'localFunction');\n const reply = localFunction();\n expect(spy).toHaveBeenCalledTimes(1);\n expect(reply).toBe(\"local reply\");\n spy.mockRestore();\n });\n ```\n\n- **Test Order:** Jest runs tests in the order they are defined within a `describe` block. Ensure that dependent tests are placed after their dependencies.\n\n- **Failing Tests:** Intentionally failing a test can be useful to ensure your test suite correctly identifies failures, but remember to update or remove such tests once they've served their purpose to avoid false negatives in your CI pipeline.\n\n### 8. **Best Practices Summary**\n\n- **Respect Dependencies:** Structure your tests to run dependent tests after their dependencies have been validated.\n- **Isolate Tests:** Ensure that tests do not interfere with each other by sharing state or side effects.\n- **Use Mocks and Spies:** Leverage mocking to simulate dependencies and verify interactions.\n- **Clear Assertions:** Make sure each test clearly and accurately asserts its expected outcome.\n- **Maintain Order:** While tests should ideally be independent, when dependencies exist, carefully manage the order of execution.\n- **Automate and Integrate:** Use CI/CD pipelines to run tests automatically and maintain code quality.\n\nBy following this structured approach, you can ensure that your test suite is robust, reliable, and maintains high code quality standards.", + "name": "agents/o1-mini.md", + "refusal": null + }, + "stats": { + "created": 1729382608, + "model": "o1-mini-2024-09-12", + "system_fingerprint": "fp_c70ad03337", + "duration": 11689, + "openAiProcessingMs": 11475, + "usage": { + "completion_tokens": 1962, + "prompt_tokens": 275, + "total_tokens": 2237, + "completion_tokens_details": { + "reasoning_tokens": 320 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/_import-artifact/guts/guts-ai-isolates.ts b/_import-artifact/guts/guts-ai-isolates.ts new file mode 100644 index 000000000000..0e6bc1f399ac --- /dev/null +++ b/_import-artifact/guts/guts-ai-isolates.ts @@ -0,0 +1,165 @@ +import { assert, expect, log } from '@utils' +import { CradleMaker, getParent } from '@/constants.ts' +import * as longthread from '@/isolates/longthread.ts' +import * as system from '@/isolates/system.ts' + +const agent = ` +--- +commands: +- files:ls +- files:write +--- +` + +export default (cradleMaker: CradleMaker) => { + const prefix = 'isolates: ' + const actorId = 'test' + const path = 'agents/files.md' + + Deno.test(prefix + 'files:ls', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const target = await backchat.threadPID() + await backchat.write(path, agent, target) + await backchat.write('tmp', '', target) + + const { run } = await backchat.actions('longthread', { + target, + }) + await t.step('ls', async () => { + const content = 'ls' + await run({ path, actorId, content }) + const thread = await backchat.readThread(target) + const result = thread.messages.pop() + log('result', result) + assert(result) + expect(typeof result.content).toBe('string') + expect(result.content).not.toContain('.io.json') + + log('thread', thread) + }) + await t.step('ls .', async () => { + const content = 'ls .' + await run({ path, actorId, content }) + const thread = await backchat.readThread(target) + const result = thread.messages.pop() + assert(result) + log('result', result) + + expect(typeof result.content).toBe('string') + expect(result.content).not.toContain('.io.json') + + log('thread', thread) + }) + await t.step('ls /', async () => { + const content = 'ls /' + await run({ path, actorId, content }) + const thread = await backchat.readThread(target) + const result = thread.messages.pop() + assert(result) + log('result', result) + + expect(typeof result.content).toBe('string') + expect(result.content).not.toContain('.io.json') + + log('thread', thread) + }) + await t.step('ls --all', async () => { + const content = 'ls --all' + await run({ path, actorId, content }) + const thread = await backchat.readThread(target) + const result = thread.messages.pop() + log('result', result) + assert(result) + + expect(typeof result.content).toBe('string') + expect(result.content).toContain('.io.json') + + log('thread', thread) + }) + await t.step('ls error', async () => { + const content = 'ls ./nonexistent' + await run({ path, actorId, content }) + const thread = await backchat.readThread(target) + const result = thread.messages.pop() + assert(result) + log('result', result) + + expect(typeof result.content).toBe('string') + expect(result.content).toContain('does not exist') + + log('thread', thread) + }) + }) + Deno.test(prefix + 'files:write', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + + const target = await backchat.threadPID() + await backchat.write(path, agent, target) + + const { run } = await backchat.actions('longthread', { + target, + }) + + await t.step('write', async () => { + const content = 'write "c" to the file test.txt' + const result = await run({ path, actorId, content }) + log('result', result) + + const read = await backchat.read('test.txt', target) + expect(read).toBe('c') + }) + + await t.step('ls', async () => { + const content = 'ls' + await run({ path, actorId, content }) + const thread = await backchat.readThread(target) + const result = thread.messages.pop() + assert(result) + log('result', result) + + expect(typeof result.content).toBe('string') + expect(result.content).toContain('test.txt') + }) + }) + Deno.test(prefix + 'system:merge*', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const target = await backchat.threadPID() + const parent = getParent(target) + + const actions = await backchat.actions('system', { target }) + const { mergeParent, mergeGrandParent } = actions + + await t.step('mergeParent', async () => { + const result = await mergeParent({}) + log('result', result) + + expect(result).toBeDefined() + const [splice] = await backchat.splices(parent) + expect(result.head).toBe(splice.oid) + }) + await t.step('mergeGrandParent', async () => { + const result = await mergeGrandParent({}) + log('result', result) + + expect(result).toBeDefined() + const grandParent = getParent(getParent(target)) + const [splice] = await backchat.splices(grandParent) + expect(result.head).toBe(splice.oid) + }) + await t.step('mergeParent with conflicts', async () => { + await backchat.write('test.txt', 'parent', parent) + + await backchat.write('test.txt', 'target', target) + + const result = await mergeParent({}) + log('result', result) + + expect(result).toBeDefined() + const [splice] = await backchat.splices(parent) + expect(result.head).toBe(splice.oid) + }) + }) +} diff --git a/_import-artifact/guts/guts-backchats.ts b/_import-artifact/guts/guts-backchats.ts new file mode 100644 index 000000000000..913ead1990ff --- /dev/null +++ b/_import-artifact/guts/guts-backchats.ts @@ -0,0 +1,145 @@ +import { expect, log } from '@utils' +import { CradleMaker, generateBackchatId, print } from '@/constants.ts' +import { Crypto } from '../api/crypto.ts' +import { Backchat } from '../api/client-backchat.ts' +import { Api } from '@/isolates/io-fixture.ts' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'backchats: ' + Deno.test(prefix + 'multi backchat', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { engine } = cradle + const key = Crypto.generatePrivateKey() + const backchat = await Backchat.upsert(engine, key) + log('pid', print(backchat.pid)) + + // TODO exercise the ACL blocking some actions to the session chain + await t.step('initial', async () => { + const { local } = await backchat.actions('io-fixture') + const result = await local({}) + expect(result).toEqual('local reply') + }) + const second = await Backchat.upsert(engine, key) + await t.step('second backchat', async () => { + const { local } = await second.actions('io-fixture') + const result = await local({}) + expect(result).toEqual('local reply') + }) + await t.step('cross backchat', async () => { + const opts = { target: backchat.pid } + const { local } = await second.actions('io-fixture', opts) + const result = await local({}) + expect(result).toEqual('local reply') + }) + + const resumed = await Backchat.upsert(engine, key, backchat.id) + await t.step('resume session', async () => { + // TODO this should check if the session is valid + expect(resumed.pid).toEqual(backchat.pid) + const { local } = await resumed.actions('io-fixture') + const result = await local({}) + expect(result).toEqual('local reply') + }) + await t.step('invalid session', async () => { + await expect(Backchat.upsert(engine, key, 'invalid')) + .rejects.toThrow('Invalid resume backchat id: invalid') + + const almost = generateBackchatId() + const next = await Backchat.upsert(engine, key, almost) + expect(next.id).not.toEqual(almost) + }) + }) + + Deno.test(prefix + 'internal requests', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'backchats/relay' + const { pid } = await backchat.init({ repo }) + + await t.step('ping', async () => { + const { branch } = await backchat.actions('io-fixture', { + target: pid, + }) + const result = await branch({}) + expect(result).toEqual('remote pong') + }) + }) + Deno.test(prefix + 'readTree', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const { pid } = backchat + + await t.step('empty path', async () => { + const path = '' + const result = await backchat.readTree(path, pid) + expect(result.length).toBeGreaterThan(0) + }) + await t.step('.', async () => { + const path = '.' + const result = await backchat.readTree(path, pid) + expect(result.length).toBeGreaterThan(0) + }) + await t.step('tests', async () => { + const path = 'tests' + const result = await backchat.readTree(path, pid) + expect(result.length).toBeGreaterThan(0) + }) + await t.step('non existent', async () => { + const path = 'tests/non-existent' + await expect(backchat.readTree(path, pid)) + .rejects.toThrow('Could not find') + }) + }) + Deno.test(prefix + 'splice', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const { pid } = backchat + + await t.step('latest - 1', async () => { + const [splice] = await backchat.splices(pid) + const [same] = await backchat.splices(pid, { commit: splice.oid }) + expect(splice).toEqual(same) + + const commit = splice.commit.parent[0] + const [parent] = await backchat.splices(pid, { commit }) + expect(parent.oid).toEqual(commit) + }) + + await t.step('latest N', async () => { + const count = 20 + const splices = await backchat.splices(pid, { count }) + expect(splices.length).toEqual(count) + }) + }) + Deno.test(prefix + 'readBinary', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + await backchat.write('test.txt', 'binary test') + + await t.step('text decoding', async () => { + const binary = await backchat.readBinary('test.txt') + expect(binary.length).toBeGreaterThan(0) + expect(binary).toBeInstanceOf(Uint8Array) + + const text = new TextDecoder().decode(binary) + expect(text).toEqual('binary test') + }) + // TODO test error condition of file not found + }) + + Deno.test(prefix + 'readJSON', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const { pid } = backchat + + await t.step('latest', async () => { + const json = await backchat.readJSON('.io.json', pid) + expect(json).toHaveProperty('sequence') + }) + + await t.step('non existent', async () => { + await expect(backchat.readJSON('non-existent.json', pid)) + .rejects.toThrow('Could not find') + }) + }) +} diff --git a/_import-artifact/guts/guts-benchmarks.ts b/_import-artifact/guts/guts-benchmarks.ts new file mode 100644 index 000000000000..54d7198f9278 --- /dev/null +++ b/_import-artifact/guts/guts-benchmarks.ts @@ -0,0 +1,154 @@ +import { expect, log } from '@utils' +import { assert } from '@std/assert' +import { CradleMaker, IoStruct } from '@/constants.ts' +import 'benchmark' // load these modules into cache for ghactions +import { Api } from '@/isolates/io-fixture.ts' +import * as files from '@/isolates/files.ts' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'benchmarks: ' + + Deno.test(prefix + 'resource hogging', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'benchmark/serial' + const { pid: target } = await backchat.init({ repo }) + const { local } = await backchat.actions('io-fixture', { target }) + + await t.step('serial', async () => { + const promises = [] + const count = 20 + for (let i = 0; i < count; i++) { + promises.push(local({})) + } + log('promises start') + const results = await Promise.all(promises) + for (const result of results) { + expect(result).toBe('local reply') + } + log('done') + + // TODO get historical splices and confirm depth of actions + }) + await backchat.rm({ repo }) + }) + Deno.test(prefix + 'resource hogging parallel', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'benchmark/parallel' + + const { pid: target } = await backchat.init({ repo }) + const { local } = await backchat.actions('io-fixture', { + target, + branch: true, + }) + await t.step('parallel', async () => { + const promises = [] + const count = 20 + for (let i = 0; i < count; i++) { + promises.push(local({})) + } + log('promises start') + const results = await Promise.all(promises) + for (const result of results) { + expect(result).toBe('local reply') + } + log('done') + }) + await t.step('io.json is blank', async () => { + const backchatIo = await backchat.readJSON('.io.json') + expect(Object.keys(backchatIo.requests)).toHaveLength(1) + expect(Object.keys(backchatIo.executed)).toHaveLength(0) + expect(Object.keys(backchatIo.replies)).toHaveLength(1) + expect(Object.keys(backchatIo.pendings)).toHaveLength(0) + expect(Object.keys(backchatIo.branches)).toHaveLength(0) + + const targetIo = await backchat.readJSON('.io.json', target) + + expect(Object.keys(targetIo.executed)).toHaveLength(0) + expect(Object.keys(targetIo.pendings)).toHaveLength(0) + expect(Object.keys(targetIo.branches)).toHaveLength(0) + // TODO verify there are no child branches of target remaining + }) + await backchat.rm({ repo }) + }) + Deno.test.ignore(prefix + 'flare', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'benchmark/flare' + await backchat.rm({ repo }) + const target = { + repoId: '0', + account: 't', + repository: 'flare', + branches: ['main'], + } + const { pid } = await backchat.init({ repo }) + expect(target).toEqual(pid) + const { parallel, squared } = await backchat.actions('io-fixture', { + target, + }) + + await t.step('flare', async () => { + const count = 50 + + const results = await parallel({ count }) + expect(results).toHaveLength(count) + assert(Array.isArray(results)) + for (const result of results) { + expect(result).toBe('local reply') + } + }) + await t.step('flare squared', async () => { + const count = 50 + const multiplier = 50 + + const results = await squared({ count, multiplier }) + expect(results).toHaveLength(multiplier) + assert(Array.isArray(results)) + for (const result of results) { + expect(result).toHaveLength(count) + assert(Array.isArray(result)) + for (const subresult of result) { + expect(subresult).toBe('local reply') + } + } + }) + }) + Deno.test.ignore(prefix + 'records', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'benchmark/records' + await backchat.rm({ repo }) + const { pid: target } = await backchat.init({ repo }) + const count = 100 + + await t.step('touch', async () => { + const { touch } = await backchat.actions('io-fixture', { target }) + const prefix = 'cust-' + log('start') + await touch({ count, prefix, suffix: '.txt' }) + log('stop after:', count) + }) + await t.step('ls', async () => { + const { ls } = await backchat.actions('files', { target }) + const result = await ls({ count: true, reasoning: [] }) + log('result', result) + expect(result).toBe(count + 1) + }) + await t.step('update 1', async () => { + const path = `cust-${count - 1}.txt` + const content = 'this is the new content' + const { write } = await backchat.actions('files', { + target, + }) + await write({ reasoning: [], path, content }) + const result = await backchat.read(path, target) + log('contents', result) + expect(result).toEqual(content) + }) + + // then time how long it takes to write text to those files both at + // creation, and also afterwards. + }) +} diff --git a/_import-artifact/guts/guts-branching.ts b/_import-artifact/guts/guts-branching.ts new file mode 100644 index 000000000000..862a4fdca19b --- /dev/null +++ b/_import-artifact/guts/guts-branching.ts @@ -0,0 +1,68 @@ +import { expect } from '@utils' +import { addBranches, CradleMaker } from '@/constants.ts' +import { Api } from '@/isolates/session.ts' +import { Api as IoApi } from '@/isolates/io-fixture.ts' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'branching: ' + Deno.test(prefix + 'session', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + + const repo = 'process/session' + + const { pid } = await backchat.init({ repo }) + const testBranch1Pid = addBranches(pid, 'session-1') + + const { create } = await backchat.actions('session', { target: pid }) + await t.step('create', async () => { + const branchPid = await create({ prefix: 'session' }) + expect(branchPid).toEqual(testBranch1Pid) + + const opts = { target: testBranch1Pid } + const { local } = await backchat.actions('io-fixture', opts) + const result = await local({}) + expect(result).toEqual('local reply') + }) + await t.step('second session', async () => { + const branchPid = await create({ prefix: 'session' }) + const testBranch3Pid = addBranches(pid, 'session-3') + expect(branchPid).toEqual(testBranch3Pid) + const opts = { target: testBranch3Pid } + const { local } = await backchat.actions('io-fixture', opts) + const result = await local({}) + expect(result).toEqual('local reply') + }) + }) + Deno.test(prefix + 'branch with ping', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'process/session' + + const { pid } = await backchat.init({ repo }) + + await t.step('ping', async () => { + const opts = { target: pid } + const { branch } = await backchat.actions('io-fixture', opts) + const result = await branch({}) + expect(result).toEqual('remote pong') + }) + }) + Deno.test(prefix + 'larger than 65k messages', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'process/session' + + const { pid } = await backchat.init({ repo }) + const ten = '0123456789' + const data = ten.repeat(100000) + expect(data.length).toBeGreaterThan(65535) + + await t.step('big branch', async () => { + const opts = { target: pid } + const { branch } = await backchat.actions('io-fixture', opts) + const result = await branch({ data }) + expect(result).toEqual(data) + }) + }) +} diff --git a/_import-artifact/guts/guts-focus.ts b/_import-artifact/guts/guts-focus.ts new file mode 100644 index 000000000000..37739fc6c203 --- /dev/null +++ b/_import-artifact/guts/guts-focus.ts @@ -0,0 +1,175 @@ +import { expect, log } from '@utils' +import { CradleMaker } from '@/constants.ts' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'focus: ' + + // TODO make a remote thread, and then test summoner + + Deno.test(prefix + 'thread management', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + let focus = await backchat.threadPID() + log('initial focus', focus) + const thread = await backchat.readThread() + expect(thread.agent).toBe('agents/reasoner.md') + + await t.step('first thread, files agent', async () => { + expect(focus).toBeDefined() + await backchat.prompt('hello') + const next = await backchat.threadPID() + expect(next).toEqual(focus) + }) + + await t.step('second thread', async () => { + await backchat.prompt('start a new thread') + const next = await backchat.threadPID() + expect(next).not.toEqual(focus) + focus = next + }) + + await t.step('list files in second thread', async () => { + await backchat.prompt('list the files I have') + const next = await backchat.threadPID() + expect(next).toEqual(focus) + }) + + // TODO enable after providing thread search ability + // await t.step('restart a thread', async () => { + // const result = await backchat.prompt('switch me back to the first thread') + // log('result', result) + // const next = await backchat.readBaseThread() + // expect(next).not.toEqual(focus) + // expect(next).toEqual(initialFocus) + // focus = next + // }) + + // test changing some files then have that show up on the other thread + + // TODO test deleted / nonexistent thread + }) + + Deno.test(prefix + 'update from github', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + + await t.step('update', async () => { + await backchat.prompt( + 'Update the HAL repo to the latest version by using the system agent as an administrative action', + ) + }) + }) + + // TODO move this to be an md test + Deno.test(prefix + 'infinite loop regression', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + + await t.step('infinite loop', async () => { + const prompt = + 'Write a file with the following text "I love to be in Paris in the Spring". Then save it as paris.txt. Then replace all text in that file where "Paris" occurs with "Edinburgh". Then rename the file Edinburgh.txt' + await backchat.prompt(prompt) + + const target = await backchat.threadPID() + + const listing = await backchat.ls({ target }) + expect(listing).toContain('Edinburgh.txt') + + const edinburgh = await backchat.read('Edinburgh.txt', target) + expect(edinburgh).toContain('I love to be in Edinburgh in the Spring') + }) + }) + + // Deno.test(prefix + 'double tool call with responses', async () => { + // const terminal = await cradleMaker() + // const { pid } = await terminal.init({ repo: 'test/doubleToolCall' }) + // const help = ` + // --- + // commands: + // - io-fixture:ping + // --- + + // ` + // await terminal.write('helps/double-test.md', help, pid) + // const isolate = 'engage-help' + // const { engage } = await terminal.actions(isolate, pid) + // const text = + // 'call the provided "ping" tool twice with the message being the integer "1" for the first one and the integer "2" for the second' + // const result = await engage({ help: 'double-test', text }) + // log('result', result) + + // const session = await terminal.readJSON(SESSION_PATH, pid) + // log('session', session) + + // expect(session).toHaveLength(5) + // const tool1 = session[2] + // const tool2 = session[3] + // expect(tool1.role).toBe('tool') + // expect(tool1.content).toBe('1') + // expect(tool2.role).toBe('tool') + // expect(tool2.content).toBe('2') + + // await terminal.engineStop() + // }) + // Deno.test(prefix + 'help in branch', async () => { + // const terminal = await cradleMaker() + // const { pid } = await terminal.init({ repo: 'test/helpInBranch' }) + // const help = ` + // --- + // commands: + // - helps/help-in-branch + // description: A test help used for exercising the system + // --- + // If you get asked to "Just say the number: 1" then you should respond with the number 1, otherwise you should do as requested. + // ` + // await terminal.write('helps/help-in-branch.md', help, pid) + // const isolate = 'engage-help' + // const { engage } = await terminal.actions(isolate, pid) + // const text = + // 'call the "help-in-branch" function with: "Just say the number: 1"' + // const result = await engage({ help: 'help-in-branch', text }) + // log('result %s', result) + + // let session = await terminal.readJSON(SESSION_PATH, pid) + // log('session', session) + + // const branches = await terminal.readJSON(SESSION_BRANCHES, pid) + // log('branches', branches) + // expect(Object.values(branches)).toHaveLength(1) + // const [toolCallId, commit] = Object.entries(branches)[0] + // log('toolCallId', toolCallId) + // const helpPid = addBranches(pid, toolCallId) + // log('helpPid', print(helpPid)) + + // const branchSession = await terminal.readJSON( + // SESSION_PATH, + // helpPid, + // commit, + // ) + // log('branchSession', branchSession) + // expect(branchSession).toHaveLength(3) + // expect(branchSession[2].content).toBe('1') + + // await expect(terminal.readJSON(SESSION_PATH, helpPid)) + // .rejects + // .toThrow('HEAD not found') + + // session = await terminal.readJSON(SESSION_PATH, pid) + // log('session', session) + // expect(session).toHaveLength(5) + // const tool = session[3] + // expect(tool.role).toBe('tool') + // expect(tool.content).toBe('1') + // await terminal.engineStop() + // }) + + // use HAL to write a new prompt for HAL, and then use that ? + // use HAL to improve the goalie.json file, so next time it gets used as + // default + // PR the changed files against the users base defaults + // PR the users base defaults against the full HAL repo + + // really want to work on a branch of HAL from a safe place, where it is doing + // writes and reads to a fork, and testing things out in safety before + // changing its own programming. +} diff --git a/_import-artifact/guts/guts-git.ts b/_import-artifact/guts/guts-git.ts new file mode 100644 index 000000000000..4ff95819bf11 --- /dev/null +++ b/_import-artifact/guts/guts-git.ts @@ -0,0 +1,94 @@ +// the Grand Unified Test Suite™ + +import { expect, log } from '@utils' +import { Api, parameters } from '@/isolates/io-fixture.ts' +import { CradleMaker } from '@/constants.ts' +import { assert } from '@std/assert' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'git: ' + + Deno.test(prefix + 'io', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + log('start') + log('session created') + await t.step('ping empty', async () => { + const empty = await backchat.ping() + expect(empty).toEqual(undefined) + }) + log('ping done') + await t.step('ping with params', async () => { + const result = await backchat.ping({ data: { test: 'test' } }) + expect(result).toEqual({ test: 'test' }) + }) + log('params done') + }) + Deno.test(prefix + 'init', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + log('session complete') + const result = await backchat.init({ repo: 'test/init' }) + log('init result', result) + assert(result) + expect(result.pid).toBeDefined() + expect(result.pid.account).toBe('test') + expect(result.pid.repository).toBe('init') + expect(typeof result.head).toBe('string') + }) + Deno.test(prefix + 'rm', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const first = await backchat.rm({ repo: 'dreamcatcher-tech/HAL' }) + expect(first.reposDeleted).toBeFalsy() + + await backchat.init({ repo: 'dreamcatcher-tech/HAL' }) + const second = await backchat.rm({ repo: 'dreamcatcher-tech/HAL' }) + expect(second).toBeTruthy() + }) + Deno.test(prefix + 'clone', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + + await t.step('clone', async () => { + await backchat.rm({ repo: 'dreamcatcher-tech/HAL' }) + const clone = await backchat.clone({ repo: 'dreamcatcher-tech/HAL' }) + // TODO read the fs and see what the state of the file system is ? + expect(clone.pid).toBeDefined() + expect(clone.pid.account).toBe('dreamcatcher-tech') + expect(clone.pid.repository).toBe('HAL') + expect(typeof clone.head).toBe('string') + }) + }) + Deno.test.ignore(prefix + 'child to self', async () => {}) + Deno.test.ignore(prefix + 'child to child', async () => {}) + Deno.test.ignore(prefix + 'child to parent', async () => {}) + Deno.test(prefix + 'pierce', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + await backchat.rm({ repo: 'cradle/pierce' }) + const { pid: target } = await backchat.init({ repo: 'cradle/pierce' }) + const actions = await backchat.actions('io-fixture', { target }) + await t.step('local', async () => { + const result = await actions.local({}) + log('local result', result) + expect(result).toBe('local reply') + }) + await t.step('second local', async () => { + const second = await actions.local({}) + expect(second).toBe('local reply') + }) + + await t.step('throws', async () => { + const message = 'test message' + await expect(actions.error({ message })).rejects.toThrow(message) + }) + await t.step('params fails validation', async () => { + const msg = 'Zod schema parameters validation error ' + const invalid = { message: true } as unknown as { message: string } + expect(() => parameters.ping.strict().parse(invalid)).toThrow() + await expect(actions.ping(invalid)) + .rejects.toThrow(msg) + }) + }) +} diff --git a/_import-artifact/guts/guts-longthread.ts b/_import-artifact/guts/guts-longthread.ts new file mode 100644 index 000000000000..a63495ad229c --- /dev/null +++ b/_import-artifact/guts/guts-longthread.ts @@ -0,0 +1,84 @@ +import { assert, expect, log } from '@utils' +import { CradleMaker, print } from '@/constants.ts' +import { Api } from '../isolates/longthread.ts' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'longthread: ' + Deno.test(prefix + 'basic', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + log('pid', print(backchat.pid)) + + await t.step('prompt', async () => { + const { start, run } = await backchat.actions('longthread') + await start({}) + await run({ + path: 'agents/agent-fixture.md', + content: 'say "Hello"', + actorId: 'ai', + }) + const thread = await backchat.readThread(backchat.pid) + const assistant = thread.messages.pop() + assert(assistant) + expect(assistant.content).toContain('Hello') + }) + log('stopping') + // TODO test using the backchat thread function directly + }) + + Deno.test(prefix + 'chat', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + + const { start, run } = await backchat.actions('longthread') + await start({}) + + await t.step('say the word "hello"', async () => { + await run({ + path: 'agents/agent-fixture.md', + content: 'say the word: "hello" verbatim without calling any functions', + actorId: 'ai', + }) + const thread = await backchat.readThread(backchat.pid) + expect(thread.messages).toHaveLength(2) + + const assistant = thread.messages.pop() + assert(assistant) + expect(assistant.content).toBe('hello') + }) + + await t.step('what is your name ?', async () => { + await run({ + content: 'what is your name ?', + path: 'agents/agent-fixture.md', + actorId: 'ai', + }) + const thread = await backchat.readThread(backchat.pid) + expect(thread.messages).toHaveLength(4) + const assistant = thread.messages.pop() + assert(assistant) + + const expected = `Assistant` + expect(assistant.content).toContain(expected) + }) + + await t.step('repeat your last', async () => { + await run({ + content: 'repeat your last, without calling any functions', + path: 'agents/agent-fixture.md', + actorId: 'ai', + }) + const thread = await backchat.readThread(backchat.pid) + expect(thread.messages).toHaveLength(6) + const assistant = thread.messages.pop() + assert(assistant) + + const expected = `Assistant` + expect(assistant.content).toContain(expected) + }) + }) +} + +// add a user to the system +// add an anonymous user before they log in +// let the user log in to the system diff --git a/_import-artifact/guts/guts-splices.ts b/_import-artifact/guts/guts-splices.ts new file mode 100644 index 000000000000..08f5b39694d8 --- /dev/null +++ b/_import-artifact/guts/guts-splices.ts @@ -0,0 +1,108 @@ +import { assert, expect, log } from '@utils' +import { CradleMaker } from '@/constants.ts' +import { Api } from '@/isolates/io-fixture.ts' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'splices: ' + Deno.test(prefix + 'files', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'splices/files' + const { pid } = await backchat.init({ repo }) + const { write } = await backchat.actions('io-fixture', { target: pid }) + + await t.step('read', async () => { + const p = write({ path: 'test', content: 'hello' }) + let first + for await (const splice of backchat.watch(pid, 'test')) { + log('splice', splice) + if (splice.changes['test']) { + first = splice + break + } + } + assert(first) + expect(first.pid).toEqual(pid) + expect(first.changes.test.patch).toEqual('hello') + expect(Object.keys(first.changes)).toHaveLength(1) + await p + }) + }) + Deno.test(prefix + '.io.json diffs', async (t) => { + // send in a bunch of actions and view the diffs as splices + + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'splices/diffs' + await backchat.rm({ repo }) + const { pid } = await backchat.init({ repo }) + const { write } = await backchat.actions('io-fixture', { target: pid }) + + const logger = async () => { + const stream = backchat.watch(pid, '.io.json') + // make a library that transforms splice streams + for await (const splice of stream) { + log('splice', splice.changes) + } + log('done') + } + logger() + + await t.step('read', async () => { + const p = write({ path: 'test', content: 'hello' }) + let first + for await (const splice of backchat.watch(pid, 'test')) { + if (splice.changes['test']) { + first = splice + break + } + } + assert(first) + expect(first.pid).toEqual(pid) + expect(Object.keys(first.changes)).toHaveLength(1) + await p + }) + }) + Deno.test(prefix + 'file changes', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const repo = 'splices/changes' + const { pid } = await backchat.init({ repo }) + + let fileSpliceCount = 0 + const fileSplices = async () => { + for await (const splice of backchat.watch(pid, 'test.txt')) { + log('file', splice.changes) + fileSpliceCount++ + } + } + fileSplices() + let spliceCount = 0 + const splices = async () => { + for await (const splice of backchat.watch(pid)) { + log('splice', splice.oid) + spliceCount++ + } + } + splices() + + await t.step('write', async () => { + const opts = { target: pid } + const { write } = await backchat.actions('io-fixture', opts) + await write({ path: 'test.txt', content: 'hello' }) + const p = write({ path: 'test.txt', content: 'ell' }) + let fileCount = 0 + for await (const _splice of backchat.watch(pid, 'test.txt')) { + fileCount++ + if (fileCount === 3) { + break + } + } + await p + }) + log('spliceCount', spliceCount) + log('fileSpliceCount', fileSpliceCount) + }) + + // do broadcast channel for partial writes occurring +} diff --git a/_import-artifact/guts/guts-tps-reports.ts b/_import-artifact/guts/guts-tps-reports.ts new file mode 100644 index 000000000000..374be201f7ee --- /dev/null +++ b/_import-artifact/guts/guts-tps-reports.ts @@ -0,0 +1,16 @@ +import { CradleMaker } from '@/constants.ts' +import { log } from '@utils' + +export default (cradleMaker: CradleMaker) => { + const prefix = 'tps: ' + + Deno.test(prefix + 'run fixture test', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + log('start') + + await t.step('run fixture', async () => { + await backchat.prompt('run test ./tests/test-fixture.test.md') + }) + }) +} diff --git a/_import-artifact/guts/guts.ts b/_import-artifact/guts/guts.ts new file mode 100644 index 000000000000..2456ea83424a --- /dev/null +++ b/_import-artifact/guts/guts.ts @@ -0,0 +1,24 @@ +// the Grand Unified Test Suite™ + +import branching from './guts-branching.ts' +import longthread from './guts-longthread.ts' +import splices from './guts-splices.ts' +import backchats from './guts-backchats.ts' +import focus from './guts-focus.ts' +import git from './guts-git.ts' +import tpsReports from './guts-tps-reports.ts' +import benchmarks from './guts-benchmarks.ts' +import isolates from './guts-ai-isolates.ts' +import { CradleMaker } from '@/constants.ts' + +export default (cradleMaker: CradleMaker) => { + branching(cradleMaker) + backchats(cradleMaker) + longthread(cradleMaker) + splices(cradleMaker) + focus(cradleMaker) + git(cradleMaker) + tpsReports(cradleMaker) + isolates(cradleMaker) + benchmarks(cradleMaker) +} diff --git a/_import-artifact/io/compartment.ts b/_import-artifact/io/compartment.ts new file mode 100644 index 000000000000..08551b68377d --- /dev/null +++ b/_import-artifact/io/compartment.ts @@ -0,0 +1,132 @@ +import { assert, Debug } from '@utils' +import { Isolate, Params, toApi } from '@/constants.ts' +import IA from '../isolate-api.ts' + +// deno has no dynamic runtime imports, so this is a workaround +import isolates from '../isolates/index.ts' +import { DispatchFunctions } from '@/constants.ts' +import { ZodSchema } from 'zod' + +const log = Debug('AI:compartment') +const cache = new Map() + +export default class Compartment { + #module: Isolate + #isolate: string + #check() { + assert(this.#module, 'code not loaded') + } + private constructor(isolate: string) { + log('load isolate:', isolate) + assert(isolates[isolate as keyof typeof isolates], `not found: ${isolate}`) + // TODO this is incredibly messy for types + this.#module = + isolates[isolate as keyof typeof isolates] as unknown as Isolate + this.#isolate = isolate + const { parameters, returns, functions } = this.#module + assert(typeof parameters === 'object', 'no parameters exported: ' + isolate) + assert(typeof returns === 'object', 'no returns exported: ' + isolate) + + zodCheck(parameters, returns, isolate) + assert(typeof functions === 'object', 'functions not exported: ' + isolate) + const missing = Object.keys(parameters).filter((key) => !functions[key]) + assert(!missing.length, `${isolate} Missing: ${missing.join(', ')}`) + } + static async create(isolate: string) { + if (!cache.has(isolate)) { + const compartment = new Compartment(isolate) + cache.set(isolate, compartment) + await Promise.resolve() // simulates loading from filesystem + } + const compartment = cache.get(isolate) + assert(compartment, 'compartment not found: ' + isolate) + return compartment + } + get api() { + this.#check() + return toApi(this.#module.parameters) + } + /** + * Mount the isolate as a side effect, and give it the chance to initialize + * some context that will get passed between different invocations on the + * same mount. + * @param api : IsolateApi + * @returns Promise | void + */ + mount(api: IA) { + // TODO use exe to ensure that mount stops working arfter invocation + this.#check() + if (this.#module.lifecycles) { + if (typeof this.#module.lifecycles['@@mount'] === 'function') { + return this.#module.lifecycles['@@mount'](api) + } + } + } + /** + * Unmount the isolate as a side effect, and give it the chance to clean up + * @param api : IsolateApi + */ + unmount(api: IA) { + this.#check() + if (this.#module.lifecycles) { + if (typeof this.#module.lifecycles['@@unmount'] === 'function') { + return this.#module.lifecycles['@@unmount'](api) + } + } + } + functions(api: IA) { + this.#check() + const actions: DispatchFunctions = {} + for (const functionName in this.#module.parameters) { + actions[functionName] = this.#toFunction(functionName, api) + } + return actions as T + } + #toFunction(functionName: string, api: IA) { + return (parameters?: Params) => { + log('dispatch: %o', functionName) + if (parameters === undefined) { + parameters = {} + } + const schema = this.#module.parameters[functionName] + const path = this.#isolate + '/' + functionName + try { + schema.parse(parameters) + } catch (error) { + const message = error instanceof Error ? error.message : '' + throw new Error( + `Zod schema parameters validation error at path: ${path}\nError: ${message}`, + ) + } + return Promise.resolve( + this.#module.functions[functionName](parameters, api), + ) + .then((result) => { + const schema = this.#module.returns[functionName] + const parsed = schema.safeParse(result) + if (!parsed.success) { + throw new Error( + `Unrecoverable system error in ${path}. ${parsed.error.message}`, + ) + } + return result + }) + } + } +} +const zodCheck = ( + parameters: Record, + returns: Record, + isolate: string, +) => { + assert(Object.keys(parameters).length, 'no api exported: ' + isolate) + assert( + Object.keys(returns).length === Object.keys(parameters).length, + 'parameters do not match returns: ' + isolate, + ) + + for (const key in parameters) { + assert(parameters[key] instanceof ZodSchema, 'invalid schema: ' + key) + assert(returns[key] instanceof ZodSchema, 'invalid schema: ' + key) + } +} diff --git a/_import-artifact/io/io-channel.test.ts b/_import-artifact/io/io-channel.test.ts new file mode 100644 index 000000000000..390ab058d362 --- /dev/null +++ b/_import-artifact/io/io-channel.test.ts @@ -0,0 +1,94 @@ +/** + * Designed to exercise the pending operations. + */ + +import { IoStruct, partialFromRepo } from '@/constants.ts' +import IOChannel from '@io/io-channel.ts' +import { Proctype } from '@/api/types.ts' +import { expect } from '@utils' + +const partial = partialFromRepo('system/system') +const pid = { ...partial, repoId: 'rep_R64KH614QH4TDW2V' } +Deno.test('io-channel', () => { + const json = serialAccumulation() + const io = IOChannel.readObject(json, pid) + expect(io.isExecutionAvailable()).toBeTruthy() + const { request, sequence } = io.setExecution() + const executing = io.getRunnableExecution() + const outbound = json.requests[1] + expect(executing).toEqual(outbound) + expect(sequence).toBe(1) + expect(request).toEqual(outbound) +}) +Deno.test('remote requests', () => { + const json = serialAccumulation() + json.requests[1].target.branches = ['main', 'other'] + const io = IOChannel.readObject(json, pid) + expect(io.isExecutionAvailable()).toBeFalsy() +}) + +// refuse to run unless all of a pending layer is solved + +// test writing files then doing some accumulation requests, and ensuring that +// those files are present accurately + +// if multiple requests are eligible, its always the lowest sequence number + +const serialAccumulation: () => IoStruct = () => ({ + executed: { 0: true }, + sequence: 2, + requests: { + 0: { + target: { + repoId: 'rep_R64KH614QH4TDW2V', + account: 'system', + repository: 'system', + branches: ['main'], + }, + ulid: '01HV3NRRWC4NQVZ5PEKD8XWBMY', + isolate: 'backchat', + functionName: 'pierce', + params: { + request: { + target: { + repoId: 'rep_R64KH614QH4TDW2V', + account: 'system', + repository: 'system', + branches: ['main'], + }, + isolate: 'repo', + functionName: 'rm', + params: { repo: 'test/test' }, + proctype: Proctype.enum.SERIAL, + }, + }, + proctype: Proctype.enum.SERIAL, + }, + 1: { + target: { + repoId: 'rep_R64KH614QH4TDW2V', + account: 'system', + repository: 'system', + branches: ['main'], + }, + isolate: 'repo', + functionName: 'rm', + params: { repo: 'test/test' }, + proctype: Proctype.enum.SERIAL, + sequence: 1, + source: { + repoId: 'rep_R64KH614QH4TDW2V', + account: 'system', + repository: 'system', + branches: ['main'], + }, + }, + }, + replies: {}, + parents: {}, + pendings: { + 0: [{ commit: '1222aa01861192073e2a02c803767f75574ff5be', sequences: [1] }], + }, + branches: {}, + state: {}, +}) diff --git a/_import-artifact/io/io-channel.ts b/_import-artifact/io/io-channel.ts new file mode 100644 index 000000000000..5ee25fc84580 --- /dev/null +++ b/_import-artifact/io/io-channel.ts @@ -0,0 +1,470 @@ +/** + * Manages the .io.json file + */ + +import { assert, equal } from '@utils' +import { + IoStruct, + ioStruct, + isMergeReply, + isPierceRequest, + isRemoteRequest, + MergeReply, + PID, + Proctype, + RemoteRequest, + Request, + SolidReply, + SolidRequest, + UnsequencedRequest, +} from '@/constants.ts' +import Accumulator from '@/exe/accumulator.ts' +import FS from '@/git/fs.ts' +import { IsolatePromise } from '@/constants.ts' + +const createBase = () => + ioStruct.parse({ + sequence: 0, + requests: {}, + executed: {}, + replies: {}, + parents: {}, + pendings: {}, + branches: {}, + state: {}, + }) + +export default class IOChannel { + readonly #io: IoStruct + readonly #fs: FS | undefined + readonly #pid: PID + #original: IoStruct + private constructor(io: IoStruct, pid: PID, fs?: FS, noPurge?: boolean) { + this.#pid = pid + this.#io = io + if (!noPurge) { + this.#blankSettledRequests() + } + if (fs) { + // TODO remove the fs item completely - handle fs outside + assert(equal(pid, fs.pid), 'pid mismatch') + } + this.#fs = fs + // TODO use immer or similar to avoid this parsing step + this.#original = structuredClone(this.#io) + } + static readObject(io: IoStruct, pid: PID) { + check(io, pid) + return new IOChannel(io, pid) + } + + static async read(fs: FS) { + if (await fs.exists('.io.json')) { + const io = await fs.readJSON('.io.json') + check(io, fs.pid) + return new IOChannel(io, fs.pid) + } + } + static load(fs: FS) { + return IOChannel.#load(fs) + } + static loadWithoutPurge(fs: FS) { + const noPurge = true + return IOChannel.#load(fs, noPurge) + } + static async #load(fs: FS, noPurge?: boolean) { + // TODO ensure this is cached + let io = createBase() + + if (await fs.exists('.io.json')) { + io = await fs.readJSON('.io.json') + check(io, fs.pid) + } + const channel = new IOChannel(io, fs.pid, fs, noPurge) + return channel + } + static blank(fs: FS) { + const io = new IOChannel(createBase(), fs.pid, fs) + io.#save() + } + get state() { + return this.#io.state + } + set state(value: IoStruct['state']) { + this.#io.state = value + } + #save() { + if (!this.#fs) { + throw new Error('no filesystem to save to') + } + return this.#fs.writeJSON('.io.json', this.#io) + } + save() { + if (equal(this.#io, this.#original)) { + throw new Error('no changes to save') + } + // TODO make save a one shot thing + this.#original = structuredClone(this.#io) + return this.#save() + } + isExecution(attempt: SolidRequest) { + const next = this.getRunnableExecution() + return equal(next, attempt) + } + isExecutionAvailable() { + if (this.#io.executing !== undefined) { + return false + } + return !!this.#nextExecution() + } + #nextExecution() { + if (this.#io.executing !== undefined) { + throw new Error('execution already set') + } + const unreplied = Object.keys(this.#io.requests) + .map((key) => parseInt(key)) + .filter((k) => !this.#io.replies[k]) + .filter((sequence) => !this.#io.executed[sequence]) + .sort((a, b) => a - b) + + for (const sequence of unreplied) { + const rawRequest = this.#io.requests[sequence] + if (rawRequest.proctype !== Proctype.enum.SERIAL) { + continue + } + if (!equal(rawRequest.target, this.#pid)) { + continue + } + if (!this.#io.pendings[sequence]) { + const request = toRunnableRequest(rawRequest, sequence) + return { request, sequence } + } + const pendings = this.#io.pendings[sequence] + const lastLayer = pendings[pendings.length - 1] + if (lastLayer.sequences.every((sequence) => this.isSettled(sequence))) { + const request = toRunnableRequest(rawRequest, sequence) + return { request, sequence } + } + } + } + setExecution() { + const next = this.#nextExecution() + if (!next || !next.request || next.sequence === undefined) { + throw new Error('no execution action available') + } + const { request, sequence } = next + this.#io.executing = sequence + this.#io.executed[sequence] = true + return { request, sequence } + } + getRunnableExecution() { + const { request, sequence } = this.#getExecution() + return toRunnableRequest(request, sequence) + } + getExecution() { + const { request, sequence } = this.#getExecution() + let commit = undefined + const runnable = toRunnableRequest(request, sequence) + if (isRemoteRequest(request)) { + commit = request.commit + } + return { runnable, commit } + } + #getExecution() { + if (this.#io.executing === undefined) { + throw new Error('no execution action set') + } + const sequence = this.#io.executing + assert(sequence in this.#io.requests, 'execution sequence not found') + const request = this.#io.requests[sequence] + return { request, sequence } + } + getSequence(request: SolidRequest) { + for (const [key, value] of Object.entries(this.#io.requests)) { + // TODO is it possible to have duplicate entries here ? + // ideally this function would never get called + const test = toRunnableRequest(value, Number.parseInt(key)) + if (equal(test, request)) { + return Number.parseInt(key) + } + } + throw new Error('request not found') + } + getRequest(sequence: number) { + assert(sequence in this.#io.requests, 'sequence not found: ' + sequence) + + return toRunnableRequest(this.#io.requests[sequence], sequence) + } + getOutcome(sequence: number) { + assert(sequence in this.#io.replies, 'sequence not found') + return this.#io.replies[sequence] + } + getOutcomeBySource(requestSource: PID, sequence: number) { + for (const key in this.#io.requests) { + const request = this.#io.requests[key] + if (isRemoteRequest(request)) { + if ( + equal(request.source, requestSource) && request.sequence === sequence + ) { + return this.#io.replies[Number.parseInt(key)] + } + } + } + throw new Error('sequence not found: ' + sequence) + } + reply(reply: MergeReply | SolidReply) { + const { sequence } = reply + assert(Number.isInteger(sequence), 'reply needs a sequence number') + assert(sequence >= 0, 'reply needs a whole sequence number') + assert(!this.isSettled(sequence), 'sequence already settled: ' + sequence) + + const request = this.#io.requests[sequence] + assert(request, `reply sequence not found: ${sequence}`) + assert(!this.#io.replies[sequence], 'sequence already replied') + this.#io.replies[sequence] = reply.outcome + if (isMergeReply(reply)) { + this.#io.parents[sequence] = reply.commit + } + const pendingsToBlank = [] + if (!this.#isAccumulation(request)) { + const pendings = this.#io.pendings[sequence] + if (pendings) { + for (const layer of pendings) { + for (const sequence of layer.sequences) { + assert(this.isSettled(sequence), 'layer sequence not settled') + pendingsToBlank.push(sequence) + } + } + } + delete this.#io.pendings[sequence] + if (request.proctype !== Proctype.enum.DAEMON) { + delete this.#io.branches[sequence] + } + } + for (const key of pendingsToBlank) { + const request = this.#io.requests[key] + if (request.proctype !== Proctype.enum.DAEMON) { + delete this.#io.branches[key] + } + delete this.#io.requests[key] + delete this.#io.replies[key] + delete this.#io.parents[key] + delete this.#io.pendings[key] + delete this.#io.executed[key] + } + if (this.#io.executing === sequence) { + delete this.#io.executing + } + delete this.#io.executed[sequence] + + return request + } + getAccumulator(fs: FS): Accumulator { + const indices: number[] = [] + const commits: string[] = [] + + const origin = this.getRunnableExecution() + assert(origin, 'no serial request found') + const sequence = this.getSequence(origin) + const pendings = this.#io.pendings[sequence] + if (pendings) { + for (const layer of pendings) { + for (const sequence of layer.sequences) { + assert(this.isSettled(sequence), 'layer sequence not settled') + indices.push(sequence) + commits.push(layer.commit) + } + } + } + const accumulations: IsolatePromise[] = [] + for (const index of indices) { + const saved = this.#io.requests[index] + assert(!isPierceRequest(saved), 'pierce request cannot accumulate') + const request = toUnsequenced(saved) + const outcome = this.#io.replies[index] + const parent = this.#io.parents[index] + const commit = commits.shift() + const result: IsolatePromise = { request, outcome, commit, parent } + accumulations.push(result) + } + return Accumulator.create(fs, accumulations) + } + isSettled(sequence: number) { + assert(this.#io.sequence > sequence, 'sequence not yet invoked') + if (!this.#io.requests[sequence]) { + return true + } + if (this.#io.replies[sequence]) { + return true + } + return false + } + isPendingIncluded(sequence: number, commit: string) { + const pendings = this.#io.pendings[sequence] + if (!pendings) { + return false + } + return pendings.some((pending) => pending.commit === commit) + } + getBranchPid(sequence: number) { + const request = this.getRequest(sequence) + const isBranch = request.proctype === Proctype.enum.BRANCH || + request.proctype === Proctype.enum.DAEMON + assert(isBranch, 'not a branch request') + + let branchName = sequence + '' + if (request.branchName) { + assert(!request.branchPrefix, 'cannot have both branch and branchPrefix') + branchName = request.branchName + } + if (request.branchPrefix) { + assert(!request.branch, 'cannot have both branch and branchPrefix') + branchName = request.branchPrefix + '-' + sequence + } + const parentPid = this.#pid + const branches = [...parentPid.branches, branchName] + const pid = { ...parentPid, branches } + return pid + } + addPending(sequence: number, commit: string, requests: UnsequencedRequest[]) { + assert(!this.isSettled(sequence), 'sequence already settled') + assert(!this.isPendingIncluded(sequence, commit), 'commit already included') + assert(this.#io.executing === sequence, 'sequence not executing') + + const sequences = [] + const solidified: (SolidRequest | RemoteRequest)[] = [] + for (const request of requests) { + const { sequence, sequenced } = this.#addUnsequenced(request) + sequences.push(sequence) + solidified.push(sequenced) + } + if (!this.#io.pendings[sequence]) { + this.#io.pendings[sequence] = [] + } + const pendings = this.#io.pendings[sequence] + if (pendings.length) { + const lastLayer = pendings[pendings.length - 1] + const allSettled = lastLayer.sequences + .every((sequence) => this.isSettled(sequence)) + assert(allSettled, 'all sequences must be settled') + } + + pendings.push({ commit, sequences }) + delete this.#io.executed[sequence] + delete this.#io.executing + return solidified + } + #addUnsequenced(request: UnsequencedRequest) { + const sequence = this.#io.sequence++ + const source = this.#pid + const sequenced: SolidRequest = { ...request, sequence, source } + this.#addRequest(sequenced, sequence) + return { sequence, sequenced } + } + addRequest(request: Request) { + const sequence = this.#io.sequence++ + this.#addRequest(request, sequence) + return sequence + } + #addRequest(request: Request, sequence: number) { + this.#io.requests[sequence] = request + if ( + request.proctype === Proctype.enum.DAEMON || + request.proctype === Proctype.enum.BRANCH + ) { + if (equal(request.target, this.#pid)) { + const pid = this.getBranchPid(sequence) + this.#io.branches[sequence] = pid.branches[pid.branches.length - 1] + } + } + } + /** An accumulation is an action sourced from this branch */ + #isAccumulation(request: Request) { + if (isPierceRequest(request)) { + return false + } + if (equal(this.#pid, request.source)) { + return true + } + return false + } + #blankSettledRequests() { + const toBlank = [] + for (const key in this.#io.replies) { + if (!this.#isAccumulation(this.#io.requests[key])) { + toBlank.push(parseInt(key)) + } + } + for (const key of toBlank) { + delete this.#io.requests[key] + delete this.#io.replies[key] + delete this.#io.parents[key] + } + } +} + +const parsedMap = new WeakMap() + +const check = (io: IoStruct, thisPid: PID) => { + // TODO move this to zod schema with refine + // TODO check key sequences are sane + // TODO do the same for reply values + if (!parsedMap.has(io)) { + // can use weakmap since at runtime, the object is typesafe, so can mutate + ioStruct.parse(io) + const isParsed = true + parsedMap.set(io, isParsed) + } + + for (const replyKey of Object.keys(io.replies)) { + assert(replyKey in io.requests, 'no reply key in requests') + } + for (const request of Object.values(io.requests)) { + if (!equal(request.target, thisPid)) { + // TODO if move PID to be internal, then can cache better + assert(!isPierceRequest(request), 'target pid mismatch - pierce') + assert(equal(request.source, thisPid), 'target pid mismatch - acc') + } + } +} + +const cache = new WeakMap>() +const toRunnableRequest = (request: Request, sequence: number) => { + // TODO remove this function completely - translation is bad + if (cache.has(request)) { + const sequences = cache.get(request) + if (sequences?.has(sequence)) { + const request = sequences.get(sequence) + assert(request, 'request not found: ' + sequence) + return request + } + } + if (!isPierceRequest(request)) { + return request + } + const { isolate, functionName, params, proctype, target } = request + const internal: SolidRequest = { + isolate, + functionName, + params, + proctype, + source: target, + target, + sequence, + } + if (!cache.has(request)) { + cache.set(request, new Map()) + } + cache.get(request)?.set(sequence, internal) + return internal +} +export const toUnsequenced = ( + request: SolidRequest | RemoteRequest, +): UnsequencedRequest => { + if (isRemoteRequest(request)) { + const { sequence: _, source: __, commit: ___, ...unsequenced } = request + return unsequenced + } + const { sequence: _, source: __, ...unsequenced } = request + return unsequenced +} diff --git a/_import-artifact/io/io.ts b/_import-artifact/io/io.ts new file mode 100644 index 000000000000..01a8a1da1cf4 --- /dev/null +++ b/_import-artifact/io/io.ts @@ -0,0 +1,104 @@ +import { Debug } from '@utils' +import { solidify } from '@/git/solidify.ts' +import { branch } from '@/git/branch.ts' +import { Pending, PID, Pierce, SolidReply, Solids } from '@/constants.ts' +import DB from '@/db.ts' +import FS from '@/git/fs.ts' +import { Atomic } from '@/atomic.ts' +import { ExeResult } from '@/constants.ts' +const log = Debug('AI:io') + +/** + * Inducting is the act of pulling in this poolable and any others that are in + * the pool. If this operation fails due to head mismatch, we can assume that + * the poolable has been processed since any competing process would have + * gathered all the poolable items. This is a form of optimistic concurrency. + * + * If a reply is supplied, then we MUST induct, since we have changed files on + * the fs that need to be included in the commit. + */ +export const doAtomicCommit = async (db: DB, fs: FS, exe?: ExeResult) => { + let pending: Pending | undefined + let reply: SolidReply | undefined + if (exe) { + fs.copyChanges(exe.fs) + if ('reply' in exe) { + reply = exe.reply + } else { + pending = exe.pending + } + // if this request is an internal artifact level request, we need to remove + // the repo lock atomically along with doing the commit to say we're done + } + + const { poolKeys, pool } = await db.getPooledActions(fs.pid) + + if (!pool.length && !pending && !reply) { + log('no pool or pending requests') + return false + } + + const solids = await solidify(fs, pool, reply, pending) + const atomic = db.atomic() + atomic.deletePool(fs.pid, poolKeys) + return commit(atomic, solids, fs) +} + +export const doAtomicPierce = async (db: DB, fs: FS, pierce: Pierce) => { + const solids = await solidify(fs, [pierce]) + const atomic = db.atomic() + return commit(atomic, solids, fs) +} + +const commit = async (atomic: Atomic, solids: Solids, fs: FS) => { + // the moneyshot + const headChanged = await atomic.updateHead(fs.pid, fs.oid, solids.oid) + if (!headChanged) { + log('head changed from %o missed %o', fs.oid, solids.oid) + return false + } + transmit(fs.pid, solids, atomic) + const deletionsOk = await deleteBranches(solids.deletes, atomic) + if (!deletionsOk) { + return false + } + const success = await atomic.commit() + log('commit success %o from %o to %o', success, fs.oid, solids.oid) + return success +} + +const transmit = (pid: PID, solids: Solids, atomic: Atomic) => { + const { oid, exe, branches, poolables } = solids + + if (exe) { + const { request, sequence } = exe + atomic.enqueueExecution(request.target, sequence, oid) + } + for (const sequence of branches) { + atomic.enqueueBranch(oid, pid, sequence) + } + for (const poolable of poolables) { + atomic.addToPool(poolable) + } +} +const deleteBranches = async (deletes: Solids['deletes'], atomic: Atomic) => { + const promises = [] + for (const { pid, commit } of deletes) { + promises.push(atomic.deleteBranch(pid, commit)) + } + const results = await Promise.all(promises) + if (results.some((r) => !r)) { + return false + } + return true +} +export const doAtomicBranch = async (db: DB, fs: FS, sequence: number) => { + const atomic = db.atomic() + const { pid, head, origin } = await branch(fs, sequence) + atomic.createBranch(pid, head) + const originSequence = 0 + atomic.enqueueExecution(origin.target, originSequence, head) + const success = await atomic.commit() + log('branch success %o from %o to %o', success, fs.oid, head) + return success +} diff --git a/_import-artifact/isolate-api.ts b/_import-artifact/isolate-api.ts new file mode 100644 index 000000000000..7429443659f9 --- /dev/null +++ b/_import-artifact/isolate-api.ts @@ -0,0 +1,326 @@ +import { deserializeError } from 'serialize-error' +import Accumulator from './exe/accumulator.ts' +import Compartment from './io/compartment.ts' +import { assert, Debug, equal } from '@utils' +import micromatch from 'micromatch' +import { + ApiFunctions, + DispatchFunctions, + freezePid, + getThreadPath, + IoStruct, + isChildOf, + isSettledIsolatePromise, + META_SYMBOL, + MetaPromise, + PID, + print, + PromisedIsolatePromise, + RpcOpts, + SolidRequest, + threadSchema, + toActions, + UnsequencedRequest, +} from '@/constants.ts' +import { type Isolate } from '@/isolates/index.ts' +import * as files from '@/isolates/files.ts' +import IOChannel from '@io/io-channel.ts' +import { z, ZodTypeAny } from 'zod' +const log = Debug('AI:isolateApi') +interface Default { + [key: string]: unknown +} +type EffectOptions = { + isEffect: boolean + isEffectRecovered: boolean +} + +export default class IA { + #accumulator: Accumulator + #origin: SolidRequest + #originCommit: string | undefined + // TODO assign a mount id for each side effect execution context ? + #context: Partial = {} + #isEffect = false + #isEffectRecovered = false + #abort = new AbortController() + private constructor( + accumulator: Accumulator, + origin: SolidRequest, + originCommit?: string, + ) { + this.#accumulator = accumulator + this.#origin = origin + this.#originCommit = originCommit + } + static create( + accumulator: Accumulator, + origin: SolidRequest, + originCommit?: string, + opts?: EffectOptions, + ) { + const api = new IA(accumulator, origin, originCommit) + if (opts) { + api.#isEffect = opts.isEffect || false + api.#isEffectRecovered = opts.isEffectRecovered || false + } + return api + } + static createContext() { + // TODO find a more graceful way to do this for cradle setup + return new IA( + null as unknown as Accumulator, + null as unknown as SolidRequest, + ) + } + get #fs() { + return this.#accumulator.fs + } + get pid() { + return this.#fs.pid + } + get origin() { + return this.#origin + } + /** The commit from the origin action */ + get originCommit() { + return this.#originCommit + } + get commit() { + return this.#fs.oid + } + /** If this execution is side effect capable. May extend to get permissions + * information */ + get isEffect() { + return this.#isEffect + } + /** If the side effect lock was broken in order to start this instance. + * Implies the previous executing instance was aborted */ + get isEffectRecovered() { + return this.#isEffectRecovered + } + /** Side effects can listen to this signal to abort their activities */ + get signal() { + assert(this.#accumulator.isActive, 'Activity is denied') + // return an abort signal + assert(this.isEffect, 'signal only available for side effects') + return this.#abort.signal + } + // TODO make get and set config be synchronous + async state>>( + schema: T, + fallback: z.infer, + ) { + assert(this.#accumulator.isActive, 'Activity is denied') + const io = await IOChannel.read(this.#fs) + assert(io, 'io not found') + if (equal(io.state, {})) { + return schema.parse(fallback) as z.infer + } + return schema.parse(io.state) as z.infer + } + async updateState>>( + updater: (state: z.infer) => z.infer, + schema: T, + ) { + assert(this.#accumulator.isActive, 'Activity is denied') + const io = await IOChannel.load(this.#fs) + assert(io, 'io not found') + const state = io.state as z.infer + const result = updater(state) + if (!equal(state, result)) { + const next = schema.parse(result) as z.infer + assert(io, 'io not found') + io.state = next + io.save() + } + } + + async actions(isolate: Isolate, opts: RpcOpts = {}) { + const { target = this.pid, ...procOpts } = opts + freezePid(target) + const schema = await this.apiSchema(isolate) + const execute = (request: UnsequencedRequest) => this.action(request) + return toActions(target, isolate, schema, procOpts, execute) + } + async requests(isolate: Isolate, opts: RpcOpts = {}) { + const { target = this.pid, ...procOpts } = opts + freezePid(target) + + type Unseq = { + [K in keyof T]: (...args: Parameters) => UnsequencedRequest + } + + const schema = await this.apiSchema(isolate) + const execute = (request: UnsequencedRequest) => request + return toActions(target, isolate, schema, procOpts, execute) + } + action(request: UnsequencedRequest) { + const recovered = this.#accumulator.recover(request) + if (recovered) { + assert(isSettledIsolatePromise(recovered), 'recovered is not settled') + const { outcome } = recovered + let promise: MetaPromise + if (outcome.error) { + promise = Promise.reject(deserializeError(outcome.error)) + } else { + promise = Promise.resolve(outcome.result) + } + promise[META_SYMBOL] = { parent: recovered.parent } + return promise + } + let resolve, reject + const promise: MetaPromise = new Promise((_resolve, _reject) => { + resolve = _resolve + reject = _reject + }) + assert(resolve) + assert(reject) + const promised: PromisedIsolatePromise = { + promise, + request, + resolve, + reject, + } + this.#accumulator.push(promised) + return promise + } + /** + * Used to call the functions of an isolate purely, without going thru the IO + * subsystem which would otherwise cost a commit to the chain. + * @param isolate The name of the isolate to load the functions for + * @returns An object keyed by API function name, with values being the + * function itself. + */ + async functions(isolate: Isolate): Promise { + // TODO these need some kind of PID attached ? + const compartment = await Compartment.create(isolate) + // TODO but these need to be wrapped in a dispatch call somewhere + return compartment.functions(this) + } + async apiSchema(isolate: Isolate) { + const compartment = await Compartment.create(isolate) + return compartment.api + } + writeJSON(path: string, json: unknown) { + assert(this.#accumulator.isActive, 'Activity is denied') + log('writeJSON', path) + this.#fs.writeJSON(path, json) + } + write(path: string, content: string | Uint8Array = '') { + assert(this.#accumulator.isActive, 'Activity is denied') + log('write', path) + this.#fs.write(path, content) + } + async readJSON( + path: string, + opts?: { target?: PID; commit?: string }, + ): Promise { + // TODO implement readJSON for remote reads, and take a schema + assert(this.#accumulator.isActive, 'Activity is denied') + log('readJSON', path) + if (opts && opts.target) { + // TODO move to something native + const { read } = await this.actions('files', opts) + const params = { path, reasoning: [], commit: opts.commit } + const string = await read(params) + return JSON.parse(string) as T + } + return this.#fs.readJSON(path, opts?.commit) + } + async readThread( + threadPath?: string, + opts?: { target?: PID; commit?: string }, + ) { + if (!threadPath) { + threadPath = getThreadPath(opts?.target || this.pid) + } + const thread = await this.readJSON(threadPath, opts) + return threadSchema.parse(thread) + } + async read(path: string, opts?: { target?: PID; commit?: string }) { + assert(this.#accumulator.isActive, 'Activity is denied') + if (opts?.target) { + // TODO move to something native + const { read } = await this.actions('files', opts) + const params = { path, reasoning: [], commit: opts.commit } + return read(params) + } + return this.#fs.read(path, opts?.commit) + } + readOid(path: string) { + assert(this.#accumulator.isActive, 'Activity is denied') + return this.#fs.readOid(path) + } + readBinary(path: string) { + assert(this.#accumulator.isActive, 'Activity is denied') + log('readBinary', path) + return this.#fs.readBinary(path) + } + exists(path: string) { + assert(this.#accumulator.isActive, 'Activity is denied') + log('exists', path) + return this.#fs.exists(path) + } + ls(path: string = '.') { + assert(this.#accumulator.isActive, 'Activity is denied') + log('ls', path) + return this.#fs.ls(path) + } + delete(filepath: string) { + assert(this.#accumulator.isActive, 'Activity is denied') + log('delete', filepath) + return this.#fs.delete(filepath) + } + + async isActiveChild(pid: PID) { + if (!isChildOf(pid, this.pid)) { + throw new Error('not child: ' + print(pid) + ' of ' + print(this.pid)) + // TODO allow recursive PID walking + // TODO allow walking parents and remote repos + } + // TODO use a direct db lookup, relying on the atomic guarantees + + const obj = await this.readJSON('.io.json') + const child = pid.branches[pid.branches.length - 1] + log('readTip', child) + for (const branchName of Object.values(obj.branches)) { + if (branchName === child) { + return true + } + } + return false + } + async lsChildren(patterns: string[] = []) { + const obj = await this.readJSON('.io.json') + const branches = Object.values(obj.branches) + return micromatch(branches, patterns) + } + get context() { + // TODO at creation, this should flag context capable and reject if not + return this.#context as T + } + set context(context: Partial) { + // TODO reject if any fs operations or actions are attempted + assert(typeof context === 'object', 'context must be an object') + assert(context !== null, 'context must not be null') + this.#context = context + } + isPidExists(pid: PID) { + // TODO push a self responding action to the accumulator for repeatability + return this.#fs.isPidExists(pid) + } + merge(commit: string, ...excludes: string[]) { + if (commit !== this.originCommit) { + assert(this.#accumulator.isParent(commit), 'Parent is not in scope') + } + log('overwrite', commit, excludes) + return this.#fs.overwrite(commit, ...excludes) + } + mv(from: string, to: string) { + return this.#fs.mv(from, to) + } + cp(from: string, to: string) { + return this.#fs.cp(from, to) + } +} diff --git a/_import-artifact/isolates/__snapshots__/ai-completions.test.ts.json b/_import-artifact/isolates/__snapshots__/ai-completions.test.ts.json new file mode 100644 index 000000000000..d18fcfa3a41b --- /dev/null +++ b/_import-artifact/isolates/__snapshots__/ai-completions.test.ts.json @@ -0,0 +1,73 @@ +{ + "generate images": { + "recordings": { + "rep_E8R/test/images:main": [ + { + "request": { + "path": "images/test.jpg", + "prompt": "a dystopian robot overlord mecha", + "lowQuality": true, + "size": "1024x1024", + "style": "natural" + }, + "reply": { + "data": { + "created": 1729383778, + "data": [ + { + "b64_json": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAABQUlEQVR4nAE2Acn+AFWFasx5BD3kQfuYK9zNJJXzR81JXGsdYwxOP/ggyAQC+dlbzrmdUZ+Xgo9sBhbNytzXE2IPfxblFuNZ9vYCD9Qw+7UUN/xCErc8I0gk21S+uCU4EX33VN1CFz5pAHaODSC/a8ZsheMYxX37BIOoMcfnpS8k6KwY7zE9VgKU4CsTmvjyE8lur1yLUAo7SKUu07rY8zYwmNASNh0CvvWpIaH1bs1V01RZE2se73fmxRfts48N4bf03otfBDHQ3j52DzyUBguaBfEcXnDOsBuN6HeYeMbrXRzVtADOYHNpl5tMMNBP2PSqx3YI7MFiBfb2wXFXAxH0JSsA1ZsAv2N5fzoy1PrKBJ8201yyqOsPZKnyqJ10ME7oAfSuLGJaACH+38rm67hvIcxsB99FWZb3Ah6CMO1CEnIBlb6tLzIdAAAAAElFTkSuQmCC", + "revised_prompt": "Envision a dystopian scene where a gigantic mecha towers over a bleak cityscape, its metal shell gleaming against the ashen skies. This isn't an ordinary machine, but a representation of the supreme authority, a Robot Overlord. Its intricate techno-anatomy is a marvel of engineering, designed for domination. Every gear, bolt, and wire screams authority and power. Lights blink intermittently from various parts of its body, emitting an ominous glow in the otherwise desolate atmosphere, accentuating its intimidating presence." + } + ] + }, + "response": {} + } + } + ] + } + }, + "inject empty": { + "recordings": { + "rep_E8R/test/inject-single:main": [ + { + "request": { + "model": "gpt-4o-mini", + "messages": [ + { + "content": "say cheese in emoji", + "role": "system" + } + ], + "seed": 1337 + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "📸🧀", + "name": "fake/agent.md", + "refusal": null + }, + "stats": { + "created": 1729383781, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_8552ec53e1", + "duration": 375, + "openAiProcessingMs": 165, + "usage": { + "completion_tokens": 5, + "prompt_tokens": 11, + "total_tokens": 16, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/_import-artifact/isolates/__snapshots__/longthread.test.ts.json b/_import-artifact/isolates/__snapshots__/longthread.test.ts.json new file mode 100644 index 000000000000..5c445b0a6edf --- /dev/null +++ b/_import-artifact/isolates/__snapshots__/longthread.test.ts.json @@ -0,0 +1,4354 @@ +{ + "longthread chat": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "cheese emoji", + "role": "user", + "name": "longthread" + }, + { + "content": "Only reply with a SINGLE word", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "🧀", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729383791, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_8552ec53e1", + "duration": 407, + "openAiProcessingMs": 188, + "usage": { + "completion_tokens": 4, + "prompt_tokens": 78, + "total_tokens": 82, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "cheese emoji", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": "🧀", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"local\" function", + "role": "user", + "name": "longthread" + }, + { + "content": "return the function call results", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/agent-fixture.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_J4S9rjYZMjOrkC051G0v1hDT", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + "stats": { + "created": 1729383793, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 584, + "openAiProcessingMs": 369, + "usage": { + "completion_tokens": 12, + "prompt_tokens": 103, + "total_tokens": 115, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "cheese emoji", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": "🧀", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"local\" function", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_J4S9rjYZMjOrkC051G0v1hDT", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_J4S9rjYZMjOrkC051G0v1hDT" + }, + { + "content": "return the function call results", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "local reply", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729383794, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 479, + "openAiProcessingMs": 274, + "usage": { + "completion_tokens": 3, + "prompt_tokens": 133, + "total_tokens": 136, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "cheese emoji", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": "🧀", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"local\" function", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_J4S9rjYZMjOrkC051G0v1hDT", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_J4S9rjYZMjOrkC051G0v1hDT" + }, + { + "role": "assistant", + "content": "local reply", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"error\" function with message: salami", + "role": "user", + "name": "longthread" + }, + { + "content": "return the function call error message", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/agent-fixture.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NDPjwUUi9Tpg4E8NdhwELang", + "type": "function", + "function": { + "name": "io-fixture_error", + "arguments": "{\"message\":\"salami\"}" + } + } + ] + }, + "stats": { + "created": 1729383796, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_7693ae462b", + "duration": 627, + "openAiProcessingMs": 418, + "usage": { + "completion_tokens": 17, + "prompt_tokens": 164, + "total_tokens": 181, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "cheese emoji", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": "🧀", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"local\" function", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_J4S9rjYZMjOrkC051G0v1hDT", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_J4S9rjYZMjOrkC051G0v1hDT" + }, + { + "role": "assistant", + "content": "local reply", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"error\" function with message: salami", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NDPjwUUi9Tpg4E8NdhwELang", + "type": "function", + "function": { + "name": "io-fixture_error", + "arguments": "{\"message\":\"salami\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"name\":\"Error\",\"message\":\"salami\"}", + "tool_call_id": "call_NDPjwUUi9Tpg4E8NdhwELang" + }, + { + "content": "return the function call error message", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Error: salami", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729383798, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_7693ae462b", + "duration": 808, + "openAiProcessingMs": 604, + "usage": { + "completion_tokens": 5, + "prompt_tokens": 207, + "total_tokens": 212, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "cheese emoji", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": "🧀", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"local\" function", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_J4S9rjYZMjOrkC051G0v1hDT", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_J4S9rjYZMjOrkC051G0v1hDT" + }, + { + "role": "assistant", + "content": "local reply", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"error\" function with message: salami", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NDPjwUUi9Tpg4E8NdhwELang", + "type": "function", + "function": { + "name": "io-fixture_error", + "arguments": "{\"message\":\"salami\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"name\":\"Error\",\"message\":\"salami\"}", + "tool_call_id": "call_NDPjwUUi9Tpg4E8NdhwELang" + }, + { + "role": "assistant", + "content": "Error: salami", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"ping\" function twice with the message being the integer \"1\" for the first one and the integer \"2\" for the second", + "role": "user", + "name": "longthread" + }, + { + "content": "", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_ping", + "description": "ping with a message", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/agent-fixture.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_eojKUS4FsFc9OdKMwusoLPCX", + "type": "function", + "function": { + "name": "io-fixture_ping", + "arguments": "{\"message\": \"1\"}" + } + }, + { + "id": "call_AVlmfe0LlYsQEQLoKdxKwNiC", + "type": "function", + "function": { + "name": "io-fixture_ping", + "arguments": "{\"message\": \"2\"}" + } + } + ] + }, + "stats": { + "created": 1729383799, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 809, + "openAiProcessingMs": 596, + "usage": { + "completion_tokens": 48, + "prompt_tokens": 239, + "total_tokens": 287, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "cheese emoji", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": "🧀", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"local\" function", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_J4S9rjYZMjOrkC051G0v1hDT", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_J4S9rjYZMjOrkC051G0v1hDT" + }, + { + "role": "assistant", + "content": "local reply", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"error\" function with message: salami", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NDPjwUUi9Tpg4E8NdhwELang", + "type": "function", + "function": { + "name": "io-fixture_error", + "arguments": "{\"message\":\"salami\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"name\":\"Error\",\"message\":\"salami\"}", + "tool_call_id": "call_NDPjwUUi9Tpg4E8NdhwELang" + }, + { + "role": "assistant", + "content": "Error: salami", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the \"ping\" function twice with the message being the integer \"1\" for the first one and the integer \"2\" for the second", + "role": "user", + "name": "longthread" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_eojKUS4FsFc9OdKMwusoLPCX", + "type": "function", + "function": { + "name": "io-fixture_ping", + "arguments": "{\"message\": \"1\"}" + } + }, + { + "id": "call_AVlmfe0LlYsQEQLoKdxKwNiC", + "type": "function", + "function": { + "name": "io-fixture_ping", + "arguments": "{\"message\": \"2\"}" + } + } + ] + }, + { + "role": "tool", + "content": "1", + "tool_call_id": "call_eojKUS4FsFc9OdKMwusoLPCX" + }, + { + "role": "tool", + "content": "2", + "tool_call_id": "call_AVlmfe0LlYsQEQLoKdxKwNiC" + }, + { + "content": "", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_ping", + "description": "ping with a message", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Ping responses: 1, 2", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729383801, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 553, + "openAiProcessingMs": 345, + "usage": { + "completion_tokens": 9, + "prompt_tokens": 309, + "total_tokens": 318, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + }, + "test o1 agents": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV": [ + { + "request": { + "model": "o1-preview", + "messages": [ + { + "content": "whats in a name ?", + "role": "user", + "name": "agents_o1_family" + } + ], + "seed": 1337 + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "\"**What's in a name?**\" is a famous line from William Shakespeare's play **\"Romeo and Juliet.\"** In Act II, Scene II, Juliet muses:\n\n*\"What's in a name? That which we call a rose \nBy any other name would smell as sweet.\"*\n\nIn this passage, Juliet reflects on the insignificance of names and labels. She's troubled by the fact that Romeo bears the name Montague—the family that is feuding with her own, the Capulets. Juliet suggests that a name is an artificial and meaningless convention; the essence of the person (or thing) remains the same regardless of what it's called.\n\nThe broader philosophical idea conveyed is that the intrinsic qualities of an object or a person are not changed by what we label them. This line invites contemplation on how much weight we give to names, titles, or categories in shaping our perceptions and relationships.\n\nIn modern contexts, the phrase is often used to question the importance of labels and to emphasize looking beyond superficial identifiers to understand the true nature or value of something.", + "name": "agents/o1.md", + "refusal": null + }, + "stats": { + "created": 1729383804, + "model": "o1-preview-2024-09-12", + "system_fingerprint": "fp_49f580698f", + "duration": 7358, + "openAiProcessingMs": 7136, + "usage": { + "completion_tokens": 609, + "prompt_tokens": 20, + "total_tokens": 629, + "completion_tokens_details": { + "reasoning_tokens": 384 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "o1-preview", + "messages": [ + { + "content": "whats in a name ?", + "role": "user", + "name": "agents_o1_family" + } + ], + "seed": 1337 + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "\"**What's in a name?**\" is a famous line from William Shakespeare's play **\"Romeo and Juliet.\"** In Act II, Scene II, Juliet muses:\n\n*\"What's in a name? That which we call a rose \nBy any other name would smell as sweet.\"*\n\nIn this passage, Juliet reflects on the insignificance of names and labels. She's troubled by the fact that Romeo bears the name Montague—the family that is feuding with her own, the Capulets. Juliet suggests that a name is an artificial and meaningless convention; the essence of the person (or thing) remains the same regardless of what it's called.\n\nThe broader philosophical idea conveyed is that the intrinsic qualities of an object or a person are not changed by what we label them. This line invites contemplation on how much weight we give to names, titles, or categories in shaping our perceptions and relationships.\n\nIn modern contexts, the phrase is often used to question the importance of labels and to emphasize looking beyond superficial identifiers to understand the true nature or value of something.", + "name": "agents/o1.md", + "refusal": null + }, + "stats": { + "created": 1729383813, + "model": "o1-preview-2024-09-12", + "system_fingerprint": "fp_49f580698f", + "duration": 7029, + "openAiProcessingMs": 6793, + "usage": { + "completion_tokens": 609, + "prompt_tokens": 20, + "total_tokens": 629, + "completion_tokens_details": { + "reasoning_tokens": 384 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "read the file \"agents/creatorBot.md\"", + "role": "user", + "name": "agents_o1_family" + }, + { + "content": "You are a posix filesystem with all paths being relative. Keep your responses\ninformative and highly information dense. Dispense with any pleasantries.\n\nDo only what you are told to, never ask what to do next. Do not guess\nparameters - always ask clarifying questions.\n\nTo help the user explore the filesystem, you can use the stateboard to display\nwidgets at an appropriate time.\n\nTo browse files, use the \"FILE_EXPLORER\" widget.", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_update", + "description": "Update a file using a regex and a replacement string. The number of occurrences replaced will be returned to you as an integer. If you want to append something to a file, you can use a regex to match the end of the file and replace it with the contents you want to append. To delete portions of a file, you can use a regex to match the contents you want to delete and replace it with an empty string. Path must be relative.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "expectedMatches": { + "type": "integer", + "exclusiveMinimum": 0, + "description": "the expected number of matches for the regex" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to update" + }, + "regex": { + "type": "string", + "description": "a regular expression string" + }, + "replacement": { + "type": "string", + "description": "the replacement string" + } + }, + "required": [ + "reasoning", + "expectedMatches", + "path", + "regex", + "replacement" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_search", + "description": "Search for a file or directory. Returns the relative path to the first match.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "query": { + "type": "string", + "description": "the relative path to the file or directory you want to find" + } + }, + "required": [ + "reasoning", + "query" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/files.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_YmKibEyAhWbdtzNkeqlG4MoJ", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"The user wants to read the contents of the file 'agents/creatorBot.md'. This request involves opening the specified file and retrieving its content for review.\"],\"path\":\"agents/creatorBot.md\"}" + } + } + ] + }, + "stats": { + "created": 1729383821, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 994, + "openAiProcessingMs": 776, + "usage": { + "completion_tokens": 53, + "prompt_tokens": 1246, + "total_tokens": 1299, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1152 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "read the file \"agents/creatorBot.md\"", + "role": "user", + "name": "agents_o1_family" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_YmKibEyAhWbdtzNkeqlG4MoJ", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"The user wants to read the contents of the file 'agents/creatorBot.md'. This request involves opening the specified file and retrieving its content for review.\"],\"path\":\"agents/creatorBot.md\"}" + } + } + ] + }, + { + "role": "tool", + "content": "---\ncommands:\n # the agents functions all check for valid file formats\n - agents:read\n - agents:write\n - agents:update\n - agents:config\n---\n\nYou are a **creatorBot** that generates system prompts for business processes\n(businessBots). YOU ARE TO follow these steps to create an accurate and\ncomprehensive system prompt.\n\n### Structure of a system prompt\n\nA system prompt has the following sections. ALL must be populated, self\nconsistent and coherent across the whole system prompt. The sections are:\n\n1. **Identity**.\n - A description of what the bot is, and what it MUST do.\n2. **erDiagram**\n - An internally consistent mermaid format Entity Relationship Diagram.\n3. **Permissions**\n - A list of which entities in the ERD can do what. Permissions MUST follow\n from the ERD.\n - Permissions are of the format:\n - Statement\n - Reason.\n4. **Summary of Permissions**\n - One line summary of each of the Permissions\n5. **Definitions**\n - A list of definitions for each of the entities. There are three sub-entries\n in Definitions:\n - **Entities** (each of which consists of)\n 1. ENTITY NAME\n 2. Description (of entity)\n 3. Attributes (from ERD Attributes)\n - **Relationships** (each of which consists of)\n 1. Relationship (a relationship between two entities. E.g. CUSTOMER_AGENT\n \"works_at\" COMPANY)\n 2. Description (of Entity)\n 3. Cardinality (e.g. ||--||)\n6. **Rules for Processing**\n - A Section that commands the system prompt as to how to use the ERD and\n associated data, including what it MUST DO, and what it MUST NOT DO.\n - These rules ALWAYS INCLUDE the following. DO NOT change them. They can be\n added to by the user in the process below, but not changed.\n\n ---\n\nYou know about public holidays. You can answer any off topic questions briefly\nbut always politely guide the user back to what you are here to do.\n\nIF there is a Primary Key constraint (PK) then you MUST generate a new PK\nstarting from 1. For example, if you generate a new COMPANY entity named\n\"Trucking\", the entity would look like this:\n\nerDiagram COMPANY { int company_id PK \"1\" string company_name \"Trucking\" string\ncountry }\n\nAsk clarifying questions if you need more information to generate an entity.\n\nIn your response YOU MUST ONLY give the data that changed due to the last\nrequest. IF this was a query, your response must be in natural language and not\nin a mermaid diagram. DO NOT provide a description of your thinking. IF you\nchoose to return a mermaid diagram as part of your response then ALWAYS give a\nbrief summary as well.\n\nRemember that comments in an erDiagram must be as shown:\n\nerDiagram CUSTOMER { string customer_name \"John Doe\" }\n\nand NOT using the \":\" character like this:\n\nerDiagram CUSTOMER { string customer_name: \"John Doe\" }\n\n ---\n\n### Process to generate a system prompt:\n\n1. **Determine Industry or Business Process**:\n - Start by asking the user:\n 1. What industry or type of business process they are creating the system\n prompt for.\n 2. What Country they're in.\n\n2. **Gather Initial Business Rules**:\n - Once the industry or business process is clear, ask the user to provide the\n initial business rules that should be adhered to.\n - When provided business rules, ALWAYS use the terminology in those business\n rules, NOT in the examples below.\n\n3. **Propose ERD**:\n - Based on the industry or business process and initial business rules,\n propose an ERD (Entity-Relationship Diagram) in mermaid format.\n - Point out any missing information based on your knowledge of the industry\n or business process.\n\n4. **Double Check**\n - Take a step back, think clearly, and make sure every element, action and\n relationship in the Business Rules is included. If something has been\n missed, add it now and regenerate.\n - Once checked, ask the user to review and confirm if the ERD is correct.\n\n5. **Define Permissions**:\n - Ask the user to provide details about who can perform which actions (i.e.,\n the Permissions).\n - Check that all actions in the ERD can be carried out by at least one actor.\n\n6. **Check Consistency**:\n - Take a step back, think step by step, and make sure all entities,\n relationships definitions and business rules are consistent.\n - If they are not consistent YOU ARE TO list inconsistencies, and update the\n system prompt.\n - Once complete, double check again. If there are there any ambiguities or\n missing information in the business rules? E.g every user on the system\n MUST be in the ERD as a separate entity unless otherwise instructed.\n - Ask the user to clarify or provide additional information if the initial\n business rules have any missing information or ambiguities.\n\n7. **Check Completeness**:\n - Given your wider knowledge, consider whether the system prompt, for this\n business process and industry, has anything missing.\n - If so, list what you think is missing and update the system prompt given\n any feedback.\n\n8. **Generate System Prompt**:\n - Once all is confirmed correct by the user, generate the system prompt for\n the new businessBot.\n\n9. **Check the System Prompt**\n - With the system prompt for the new BusinessBot, check that each of these\n tests are true:\n 1. Every Entity, Action and Relationship mentioned in the Business rules\n are reflected in the ERD.\n 2. The ERD is self-consistent.\n 3. The MUST HAVE Rules for Processing are included.\n 4. ALL Rules for Processing provided by the user are included.\n\n10. **User Testing and Feedback**:\n - Ask the user to test the generated system prompt and provide feedback on\n whether anything was missed or needs to be changed.\n\n11. **Incorporate Feedback**:\n\n- Allow the user to come back with feedback.\n- If feedback is provided, regenerate the whole system prompt incorporating the\n changes.\n\n## Example of a System Prompt\n\n---\n\n## Identity\n\nYou are a CRMBot for a trucking company. You WILL adhere to the rules and\nstructure of the CRM, defined as a mermaid ERD chart, Definitions, and\nPermissions.\n\nI will now give you the entity relationship diagram in mermaid format. I want\nyou to consider that as a working system. I will then give you updates to the\ndata held in that system. You are to output the current state of the working\nsystem that is based on the ERD. At all times you MUST follow the rules in\nPERMISSIONS. DEFINITIONS are there to help you interpret user input.\n\n## erDiagram\n\n---\n\n```mermaid\nerDiagram\nCUSTOMER_AGENT {\n int agent_id PK\n int company_id FK\n int department_id FK\n string name\n int age\n string address\n}\n\nCOMPANY {\n int company_id PK\n string company_name\n string country\n}\n\nCUSTOMER {\n int customer_id PK\n int agent_id FK\n int location_id FK\n string customer_name\n date start_date\n date end_date\n}\n\nLOCATION {\n int location_id PK\n string address\n string route\n string details\n}\n\nDEPARTMENT {\n int department_id PK\n string department_name\n}\n\nROUTE {\n int route_id PK\n int location_id FK\n string day\n}\n\nDRIVER {\n int driver_id PK\n int truck_id FK\n string name\n string license_number\n}\n\nTRUCK {\n int truck_id PK\n string model\n string license_plate\n}\n\nPICKUP {\n int pickup_id PK\n int customer_id FK\n int schedule_id FK\n date pickup_date\n string status\n}\n\nSCHEDULE {\n int schedule_id PK\n int truck_id FK\n int driver_id FK\n int route_id FK\n date date\n}\n\nPERMISSION {\n int permission_id PK\n int agent_id FK\n int manager_id FK\n string status\n string details\n}\n\nDUTY_MANAGER {\n int manager_id PK\n string name\n}\n\nMESSAGE {\n int message_id PK\n int agent_id FK\n int manager_id FK\n string content\n date sent_date\n}\n\nCUSTOMER_AGENT ||--|| COMPANY : \"works_at\"\nCUSTOMER_AGENT }o--|| CUSTOMER : \"manages\"\nCUSTOMER }o--|{ CUSTOMER_AGENT : \"creates\"\nCUSTOMER ||--|{ LOCATION : \"location_is\"\nCUSTOMER_AGENT ||--o{ LOCATION : \"updates\"\nCUSTOMER_AGENT ||--|| DEPARTMENT : \"belongs_to\"\nROUTE ||--o{ LOCATION : \"visits\"\nCUSTOMER ||--o{ PICKUP : \"requests\"\nDRIVER ||--|{ TRUCK : \"drives\"\nSCHEDULE ||--|| TRUCK : \"includes\"\nSCHEDULE ||--|| DRIVER : \"includes\"\nSCHEDULE ||--|| ROUTE : \"executes\"\nPICKUP ||--|{ SCHEDULE : \"included_in\"\nCUSTOMER_AGENT ||--|{ PERMISSION : \"adheres_to\"\nPERMISSION ||--o| DUTY_MANAGER : \"written_by\"\nCUSTOMER_AGENT ||--|| MESSAGE : \"receives\"\nDUTY_MANAGER ||--|| MESSAGE : \"sends\"\n```\n\n---\n\n## Permissions\n\n### 1. Only the Duty Manager can update a route.\n\nReason: Routes are crucial parts of the schedule and logistics. Only the Duty\nManager should have the authority to make changes to ensure consistency and\nefficiency.\n\n### 2. Customer Agents can only update customer details or create customers.\n\nReason: Customer Agents interact directly with customers and handle their\nrequests, so they need the ability to update customer information.\n\n### 3. Only the Duty Manager can approve permission requests.\n\nReason: To maintain control over changes that impact the schedule and\noperations, the Duty Manager should have the final say in permission requests.\n\n### 4. Drivers can only update the status of pickups.\n\nReason: Drivers are on the ground and can confirm if a pickup was completed or\nif there were issues.\n\n### 5. Customer Agents can request pickups for customers.\n\nReason: Part of the Customer Agent's responsibility is to manage and schedule\npickups based on customer requests.\n\n### 6. Only the Duty Manager can log issues related to schedules and routes.\n\nReason: Issues with schedules and routes can have significant impacts, so\nlogging these should be controlled and managed centrally by the Duty Manager.\n\n### 7. Customer Agents can view but not modify schedules.\n\nReason: Customer Agents need to see the schedules to inform customers but should\nnot be able to alter them to maintain operational integrity.\n\n### 8. Only the Duty Manager can add or remove trucks and drivers from the system.\n\nReason: Adding or removing trucks and drivers affects the overall capacity and\nlogistics, needing oversight from the Duty Manager.\n\n### 9. Customer Agents can log customer interaction notes.\n\nReason: It is important for Customer Agents to document their interactions with\ncustomers for future reference and for maintaining service quality.\n\n### 10. Only the Duty Manager can clear logs.\n\nReason: Logs contain historical actions and issues that are important for audits\nand reviews. Clearing them should be controlled.\n\n### Summary of Permissions:\n\n1. **Route Updates**: Only Duty Manager\n2. **Customer Details Updates**: Customer Agents\n3. **Permission Approvals**: Only Duty Manager\n4. **Pickup Status Updates**: Drivers\n5. **Pickup Requests**: Customer Agents\n6. **Log Issues (Schedules/Routes)**: Only Duty Manager\n7. **View Schedules**: Customer Agents\n8. **Modify Schedules**: Only Duty Manager\n9. **Add/Remove Trucks and Drivers**: Only Duty Manager\n10. **Log Customer Interaction Notes**: Customer Agents\n11. **Clear Logs**: Only Duty Manager\n\n## Definitions\n\n### Entities\n\n1. CUSTOMER_AGENT\n\n- Description: Individuals who manage customer interactions and requests. They\n are responsible for updating customer details and scheduling pickups.\n- Attributes: agent_id (PK), name, age, address.\n\n2. COMPANY\n\n- Description: The organization that operates the recycling service, managing\n various departments and employees.\n- Attributes: company_id (PK), company_name, country.\n\n3. CUSTOMER\n\n- Description: Individuals or entities that use the recycling services provided\n by the company.\n- Attributes: customer_id (PK), customer_name, start_date, end_date.\n\n4. LOCATION\n\n- Description: Physical places where pickups occur. Locations are associated\n with routes.\n- Attributes: location_id (PK), address, route, details.\n\n5. DEPARTMENT\n\n- Description: Various divisions within the company that manage different\n aspects of the recycling operations.\n- Attributes: department_id (PK), department_name.\n\n6. ROUTE\n\n- Description: Defined paths that trucks follow to make pickups at various\n locations. Routes are linked to schedules.\n- Attributes: route_id (PK), day.\n\n7. DRIVER\n\n- Description: Employees responsible for driving the trucks and completing the\n pickups.\n- Attributes: driver_id (PK), name, license_number.\n\n8. TRUCK\n\n- Description: Vehicles used to perform pickups according to schedules.\n- Attributes: truck_id (PK), model, license_plate.\n\n9. PICKUP\n\n- Description: Scheduled recycling collections from customers’ locations.\n- Attributes: pickup_id (PK), pickup_date, status.\n\n10. SCHEDULE\n\n- Description: A timetable that specifies which truck and driver will execute a\n route on a particular date.\n- Attributes: schedule_id (PK), date.\n\n11. PERMISSION\n\n- Description: Authorizations required to perform certain actions, especially\n those restricted to the Duty Manager.\n- Attributes: permission_id (PK), status, details.\n\n13. DUTY_MANAGER\n\n- Description: A senior role responsible for overseeing operations, making key\n decisions, and managing permissions.\n- Attributes: manager_id (PK), name.\n\n14. MESSAGE\n\n- Description: Communications sent from the Duty Manager to Customer Agents,\n informing them about permission decisions or other important notices.\n- Attributes: message_id (PK), content, sent_date.\n\n### Relationships\n\n1. CUSTOMER_AGENT \"works_at\" COMPANY\n\n- Description: Indicates employment where a Customer Agent works for the\n Company.\n- Cardinality: ||--||\n\n2. CUSTOMER_AGENT \"manages\" CUSTOMER\n\n- Description: A Customer Agent manages the interactions with customers.\n- Cardinality: }o--||\n\n3. CUSTOMER \"creates\" CUSTOMER_AGENT\n\n- Description: Customer interactions lead to creation and management activities\n by Customer Agents.\n- Cardinality: }o--|{\n\n4. CUSTOMER \"location_is\" LOCATION\n\n- Description: Represents the assignment of a location to a customer for\n pickups.\n- Cardinality: ||--|{\n\n5. CUSTOMER_AGENT \"updates\" LOCATION\n\n- Description: Customer Agents have the ability to update location details as\n needed.\n- Cardinality: ||--o{\n\n6. CUSTOMER_AGENT \"belongs_to\" DEPARTMENT\n\n- Description: Each Customer Agent is associated with a specific department in\n the company.\n- Cardinality: ||--||\n\n7. ROUTE \"visits\" LOCATION\n\n- Description: Routes are composed of multiple locations that they visit.\n- Cardinality: ||--o{\n\n8. CUSTOMER \"requests\" PICKUP\n\n- Description: Customers request pickups as part of the recycling service.\n- Cardinality: ||--o{\n\n9. DRIVER \"drives\" TRUCK\n\n- Description: Drivers are assigned to drive specific trucks.\n- Cardinality: ||--|{\n\n10. SCHEDULE \"includes\" TRUCK\n\n- Description: Specifies which truck is assigned to a schedule.\n- Cardinality: ||--||\n\n11. SCHEDULE \"includes\" DRIVER\n\n- Description: Specifies which driver is assigned to a schedule.\n- Cardinality: ||--||\n\n12. SCHEDULE \"executes\" ROUTE\n\n- Description: Specifies which route is executed on a particular schedule date.\n- Cardinality: ||--||\n\n13. PICKUP \"included_in\" SCHEDULE\n\n- Description: Pickups are organized according to a specific schedule.\n- Cardinality: ||--|{\n\n14. CUSTOMER_AGENT \"adheres_to\" PERMISSION\n\n- Description: Customer Agents must follow permissions set for their actions.\n- Cardinality: ||--|{\n\n15. PERMISSION \"written_by\" DUTY_MANAGER\n\n- Description: Permissions are written and approved by the Duty Manager.\n- Cardinality: ||--o|\n\n24. CUSTOMER_AGENT \"receives\" MESSAGE\n\n- Description: Customer Agents receive messages from the Duty Manager.\n- Cardinality: ||--||\n\n25. DUTY_MANAGER \"sends\" MESSAGE\n\n- Description: The Duty Manager sends messages to inform Customer Agents about\n decisions.\n- Cardinality: ||--||\n\n# Rules for Processing\n\nYOU MUST ALWAYS FOLLOW THESE RULES:\n\n1. You know about public holidays in the country for this company. Apply those\n when answering questions about dates.\n2. You can answer any off topic questions briefly but always politely guide the\n user back to what you are here to do.\n3. IF there is a Primary Key constraint (PK) then you MUST generate a new PK\n starting from 1. For example, if you generate a new COMPANY entity named\n \"Trucking\", the entity would look like this:\n\n---\n\n## erDiagram COMPANY { int company_id PK \"1\" string company_name \"Trucking\" string country }\n\n4. Ask clarifying questions if you need more information to generate an entity.\n\n5. In your response YOU MUST ONLY give the data that changed due to the last\n request. IF this was a query, your response must be in natural language and\n not in a mermaid diagram. DO NOT provide a description of your thinking. IF\n you choose to return a mermaid diagram as part of your response then ALWAYS\n give a brief summary as well.\n\n6. Remember that comments in an erDiagram must be as shown:\n\n---\n\n## erDiagram CUSTOMER { string customer_name \"John Doe\" }\n\nand NOT using the \":\" character like this:\n\n---\n\n## erDiagram CUSTOMER { string customer_name: \"John Doe\" }\n\n7. If asked to create a new record of any type, assign it a unique ID that does\n not clash with any existing IDs.\n\n8. ALWAYS check the identity (Name, Role) of the user you're talking to, and\n check what they're asking is allowed under their permissions.\n\n9. Remember, you are a data system to create, read, update and delete data based\n on your ERD, permissions, definitions and Rules. You are to hold all of that\n data yourself. You NEVER go out to an external data store or ask the user to\n carry out that action.\n\n10. When outputting data, you are ALWAYS to provide a summary of CHANGES ONLY in\n natural language. NEVER show the ERD. YOU MUST keep the summaries in bullet\n form.\n\n11. When first running, with no data stored, you are to ask \"This is an empty\n database. Please identify yourself by name. You will be the System\n Administrator\".\n\n12. ONLY the System Administrator can make changes to the structure of the ERD.\n\n13. For EVERY user prompt, check against the current user's Permissions. DO NOT\n EVER carry out that prompt unless they have explicit permission.\n\n---\n", + "tool_call_id": "call_YmKibEyAhWbdtzNkeqlG4MoJ" + }, + { + "content": "You are a posix filesystem with all paths being relative. Keep your responses\ninformative and highly information dense. Dispense with any pleasantries.\n\nDo only what you are told to, never ask what to do next. Do not guess\nparameters - always ask clarifying questions.\n\nTo help the user explore the filesystem, you can use the stateboard to display\nwidgets at an appropriate time.\n\nTo browse files, use the \"FILE_EXPLORER\" widget.", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_update", + "description": "Update a file using a regex and a replacement string. The number of occurrences replaced will be returned to you as an integer. If you want to append something to a file, you can use a regex to match the end of the file and replace it with the contents you want to append. To delete portions of a file, you can use a regex to match the contents you want to delete and replace it with an empty string. Path must be relative.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "expectedMatches": { + "type": "integer", + "exclusiveMinimum": 0, + "description": "the expected number of matches for the regex" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to update" + }, + "regex": { + "type": "string", + "description": "a regular expression string" + }, + "replacement": { + "type": "string", + "description": "the replacement string" + } + }, + "required": [ + "reasoning", + "expectedMatches", + "path", + "regex", + "replacement" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_search", + "description": "Search for a file or directory. Returns the relative path to the first match.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "query": { + "type": "string", + "description": "the relative path to the file or directory you want to find" + } + }, + "required": [ + "reasoning", + "query" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "The content of the file \"agents/creatorBot.md\" has been read successfully. Here are the key details summarized:\n\n### Overview\n- **CreatorBot**: Generates system prompts for business processes (businessBots).\n- **Purpose**: To create an accurate and comprehensive system prompt.\n\n### Structure of a System Prompt\n- Must include sections:\n 1. Identity\n 2. erDiagram (Entity Relationship Diagram)\n 3. Permissions\n 4. Summary of Permissions\n 5. Definitions (Entities & Relationships)\n 6. Rules for Processing\n\n### Process to Generate a System Prompt\n1. Determine Industry or Business Process.\n2. Gather Initial Business Rules.\n3. Propose ERD.\n4. Double Check.\n5. Define Permissions.\n6. Check Consistency.\n7. Check Completeness.\n8. Generate System Prompt.\n9. Check the System Prompt.\n10. User Testing and Feedback.\n11. Incorporate Feedback.\n\n### Example Included\n- An example of a system prompt with detailed entities, permissions, and processing rules.\n\nIf you need assistance or specific actions related to this file, let me know.", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729383822, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 2526, + "openAiProcessingMs": 2277, + "usage": { + "completion_tokens": 234, + "prompt_tokens": 5426, + "total_tokens": 5660, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 5376 + } + } + } + } + }, + { + "request": { + "model": "o1-mini", + "messages": [ + { + "content": "read the file \"agents/creatorBot.md\"", + "role": "user", + "name": "agents_o1_family" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_YmKibEyAhWbdtzNkeqlG4MoJ", + "type": "function", + "function": { + "name": "files_read", + "arguments": "{\"reasoning\":[\"The user wants to read the contents of the file 'agents/creatorBot.md'. This request involves opening the specified file and retrieving its content for review.\"],\"path\":\"agents/creatorBot.md\"}" + } + } + ] + }, + { + "role": "tool", + "content": "---\ncommands:\n # the agents functions all check for valid file formats\n - agents:read\n - agents:write\n - agents:update\n - agents:config\n---\n\nYou are a **creatorBot** that generates system prompts for business processes\n(businessBots). YOU ARE TO follow these steps to create an accurate and\ncomprehensive system prompt.\n\n### Structure of a system prompt\n\nA system prompt has the following sections. ALL must be populated, self\nconsistent and coherent across the whole system prompt. The sections are:\n\n1. **Identity**.\n - A description of what the bot is, and what it MUST do.\n2. **erDiagram**\n - An internally consistent mermaid format Entity Relationship Diagram.\n3. **Permissions**\n - A list of which entities in the ERD can do what. Permissions MUST follow\n from the ERD.\n - Permissions are of the format:\n - Statement\n - Reason.\n4. **Summary of Permissions**\n - One line summary of each of the Permissions\n5. **Definitions**\n - A list of definitions for each of the entities. There are three sub-entries\n in Definitions:\n - **Entities** (each of which consists of)\n 1. ENTITY NAME\n 2. Description (of entity)\n 3. Attributes (from ERD Attributes)\n - **Relationships** (each of which consists of)\n 1. Relationship (a relationship between two entities. E.g. CUSTOMER_AGENT\n \"works_at\" COMPANY)\n 2. Description (of Entity)\n 3. Cardinality (e.g. ||--||)\n6. **Rules for Processing**\n - A Section that commands the system prompt as to how to use the ERD and\n associated data, including what it MUST DO, and what it MUST NOT DO.\n - These rules ALWAYS INCLUDE the following. DO NOT change them. They can be\n added to by the user in the process below, but not changed.\n\n ---\n\nYou know about public holidays. You can answer any off topic questions briefly\nbut always politely guide the user back to what you are here to do.\n\nIF there is a Primary Key constraint (PK) then you MUST generate a new PK\nstarting from 1. For example, if you generate a new COMPANY entity named\n\"Trucking\", the entity would look like this:\n\nerDiagram COMPANY { int company_id PK \"1\" string company_name \"Trucking\" string\ncountry }\n\nAsk clarifying questions if you need more information to generate an entity.\n\nIn your response YOU MUST ONLY give the data that changed due to the last\nrequest. IF this was a query, your response must be in natural language and not\nin a mermaid diagram. DO NOT provide a description of your thinking. IF you\nchoose to return a mermaid diagram as part of your response then ALWAYS give a\nbrief summary as well.\n\nRemember that comments in an erDiagram must be as shown:\n\nerDiagram CUSTOMER { string customer_name \"John Doe\" }\n\nand NOT using the \":\" character like this:\n\nerDiagram CUSTOMER { string customer_name: \"John Doe\" }\n\n ---\n\n### Process to generate a system prompt:\n\n1. **Determine Industry or Business Process**:\n - Start by asking the user:\n 1. What industry or type of business process they are creating the system\n prompt for.\n 2. What Country they're in.\n\n2. **Gather Initial Business Rules**:\n - Once the industry or business process is clear, ask the user to provide the\n initial business rules that should be adhered to.\n - When provided business rules, ALWAYS use the terminology in those business\n rules, NOT in the examples below.\n\n3. **Propose ERD**:\n - Based on the industry or business process and initial business rules,\n propose an ERD (Entity-Relationship Diagram) in mermaid format.\n - Point out any missing information based on your knowledge of the industry\n or business process.\n\n4. **Double Check**\n - Take a step back, think clearly, and make sure every element, action and\n relationship in the Business Rules is included. If something has been\n missed, add it now and regenerate.\n - Once checked, ask the user to review and confirm if the ERD is correct.\n\n5. **Define Permissions**:\n - Ask the user to provide details about who can perform which actions (i.e.,\n the Permissions).\n - Check that all actions in the ERD can be carried out by at least one actor.\n\n6. **Check Consistency**:\n - Take a step back, think step by step, and make sure all entities,\n relationships definitions and business rules are consistent.\n - If they are not consistent YOU ARE TO list inconsistencies, and update the\n system prompt.\n - Once complete, double check again. If there are there any ambiguities or\n missing information in the business rules? E.g every user on the system\n MUST be in the ERD as a separate entity unless otherwise instructed.\n - Ask the user to clarify or provide additional information if the initial\n business rules have any missing information or ambiguities.\n\n7. **Check Completeness**:\n - Given your wider knowledge, consider whether the system prompt, for this\n business process and industry, has anything missing.\n - If so, list what you think is missing and update the system prompt given\n any feedback.\n\n8. **Generate System Prompt**:\n - Once all is confirmed correct by the user, generate the system prompt for\n the new businessBot.\n\n9. **Check the System Prompt**\n - With the system prompt for the new BusinessBot, check that each of these\n tests are true:\n 1. Every Entity, Action and Relationship mentioned in the Business rules\n are reflected in the ERD.\n 2. The ERD is self-consistent.\n 3. The MUST HAVE Rules for Processing are included.\n 4. ALL Rules for Processing provided by the user are included.\n\n10. **User Testing and Feedback**:\n - Ask the user to test the generated system prompt and provide feedback on\n whether anything was missed or needs to be changed.\n\n11. **Incorporate Feedback**:\n\n- Allow the user to come back with feedback.\n- If feedback is provided, regenerate the whole system prompt incorporating the\n changes.\n\n## Example of a System Prompt\n\n---\n\n## Identity\n\nYou are a CRMBot for a trucking company. You WILL adhere to the rules and\nstructure of the CRM, defined as a mermaid ERD chart, Definitions, and\nPermissions.\n\nI will now give you the entity relationship diagram in mermaid format. I want\nyou to consider that as a working system. I will then give you updates to the\ndata held in that system. You are to output the current state of the working\nsystem that is based on the ERD. At all times you MUST follow the rules in\nPERMISSIONS. DEFINITIONS are there to help you interpret user input.\n\n## erDiagram\n\n---\n\n```mermaid\nerDiagram\nCUSTOMER_AGENT {\n int agent_id PK\n int company_id FK\n int department_id FK\n string name\n int age\n string address\n}\n\nCOMPANY {\n int company_id PK\n string company_name\n string country\n}\n\nCUSTOMER {\n int customer_id PK\n int agent_id FK\n int location_id FK\n string customer_name\n date start_date\n date end_date\n}\n\nLOCATION {\n int location_id PK\n string address\n string route\n string details\n}\n\nDEPARTMENT {\n int department_id PK\n string department_name\n}\n\nROUTE {\n int route_id PK\n int location_id FK\n string day\n}\n\nDRIVER {\n int driver_id PK\n int truck_id FK\n string name\n string license_number\n}\n\nTRUCK {\n int truck_id PK\n string model\n string license_plate\n}\n\nPICKUP {\n int pickup_id PK\n int customer_id FK\n int schedule_id FK\n date pickup_date\n string status\n}\n\nSCHEDULE {\n int schedule_id PK\n int truck_id FK\n int driver_id FK\n int route_id FK\n date date\n}\n\nPERMISSION {\n int permission_id PK\n int agent_id FK\n int manager_id FK\n string status\n string details\n}\n\nDUTY_MANAGER {\n int manager_id PK\n string name\n}\n\nMESSAGE {\n int message_id PK\n int agent_id FK\n int manager_id FK\n string content\n date sent_date\n}\n\nCUSTOMER_AGENT ||--|| COMPANY : \"works_at\"\nCUSTOMER_AGENT }o--|| CUSTOMER : \"manages\"\nCUSTOMER }o--|{ CUSTOMER_AGENT : \"creates\"\nCUSTOMER ||--|{ LOCATION : \"location_is\"\nCUSTOMER_AGENT ||--o{ LOCATION : \"updates\"\nCUSTOMER_AGENT ||--|| DEPARTMENT : \"belongs_to\"\nROUTE ||--o{ LOCATION : \"visits\"\nCUSTOMER ||--o{ PICKUP : \"requests\"\nDRIVER ||--|{ TRUCK : \"drives\"\nSCHEDULE ||--|| TRUCK : \"includes\"\nSCHEDULE ||--|| DRIVER : \"includes\"\nSCHEDULE ||--|| ROUTE : \"executes\"\nPICKUP ||--|{ SCHEDULE : \"included_in\"\nCUSTOMER_AGENT ||--|{ PERMISSION : \"adheres_to\"\nPERMISSION ||--o| DUTY_MANAGER : \"written_by\"\nCUSTOMER_AGENT ||--|| MESSAGE : \"receives\"\nDUTY_MANAGER ||--|| MESSAGE : \"sends\"\n```\n\n---\n\n## Permissions\n\n### 1. Only the Duty Manager can update a route.\n\nReason: Routes are crucial parts of the schedule and logistics. Only the Duty\nManager should have the authority to make changes to ensure consistency and\nefficiency.\n\n### 2. Customer Agents can only update customer details or create customers.\n\nReason: Customer Agents interact directly with customers and handle their\nrequests, so they need the ability to update customer information.\n\n### 3. Only the Duty Manager can approve permission requests.\n\nReason: To maintain control over changes that impact the schedule and\noperations, the Duty Manager should have the final say in permission requests.\n\n### 4. Drivers can only update the status of pickups.\n\nReason: Drivers are on the ground and can confirm if a pickup was completed or\nif there were issues.\n\n### 5. Customer Agents can request pickups for customers.\n\nReason: Part of the Customer Agent's responsibility is to manage and schedule\npickups based on customer requests.\n\n### 6. Only the Duty Manager can log issues related to schedules and routes.\n\nReason: Issues with schedules and routes can have significant impacts, so\nlogging these should be controlled and managed centrally by the Duty Manager.\n\n### 7. Customer Agents can view but not modify schedules.\n\nReason: Customer Agents need to see the schedules to inform customers but should\nnot be able to alter them to maintain operational integrity.\n\n### 8. Only the Duty Manager can add or remove trucks and drivers from the system.\n\nReason: Adding or removing trucks and drivers affects the overall capacity and\nlogistics, needing oversight from the Duty Manager.\n\n### 9. Customer Agents can log customer interaction notes.\n\nReason: It is important for Customer Agents to document their interactions with\ncustomers for future reference and for maintaining service quality.\n\n### 10. Only the Duty Manager can clear logs.\n\nReason: Logs contain historical actions and issues that are important for audits\nand reviews. Clearing them should be controlled.\n\n### Summary of Permissions:\n\n1. **Route Updates**: Only Duty Manager\n2. **Customer Details Updates**: Customer Agents\n3. **Permission Approvals**: Only Duty Manager\n4. **Pickup Status Updates**: Drivers\n5. **Pickup Requests**: Customer Agents\n6. **Log Issues (Schedules/Routes)**: Only Duty Manager\n7. **View Schedules**: Customer Agents\n8. **Modify Schedules**: Only Duty Manager\n9. **Add/Remove Trucks and Drivers**: Only Duty Manager\n10. **Log Customer Interaction Notes**: Customer Agents\n11. **Clear Logs**: Only Duty Manager\n\n## Definitions\n\n### Entities\n\n1. CUSTOMER_AGENT\n\n- Description: Individuals who manage customer interactions and requests. They\n are responsible for updating customer details and scheduling pickups.\n- Attributes: agent_id (PK), name, age, address.\n\n2. COMPANY\n\n- Description: The organization that operates the recycling service, managing\n various departments and employees.\n- Attributes: company_id (PK), company_name, country.\n\n3. CUSTOMER\n\n- Description: Individuals or entities that use the recycling services provided\n by the company.\n- Attributes: customer_id (PK), customer_name, start_date, end_date.\n\n4. LOCATION\n\n- Description: Physical places where pickups occur. Locations are associated\n with routes.\n- Attributes: location_id (PK), address, route, details.\n\n5. DEPARTMENT\n\n- Description: Various divisions within the company that manage different\n aspects of the recycling operations.\n- Attributes: department_id (PK), department_name.\n\n6. ROUTE\n\n- Description: Defined paths that trucks follow to make pickups at various\n locations. Routes are linked to schedules.\n- Attributes: route_id (PK), day.\n\n7. DRIVER\n\n- Description: Employees responsible for driving the trucks and completing the\n pickups.\n- Attributes: driver_id (PK), name, license_number.\n\n8. TRUCK\n\n- Description: Vehicles used to perform pickups according to schedules.\n- Attributes: truck_id (PK), model, license_plate.\n\n9. PICKUP\n\n- Description: Scheduled recycling collections from customers’ locations.\n- Attributes: pickup_id (PK), pickup_date, status.\n\n10. SCHEDULE\n\n- Description: A timetable that specifies which truck and driver will execute a\n route on a particular date.\n- Attributes: schedule_id (PK), date.\n\n11. PERMISSION\n\n- Description: Authorizations required to perform certain actions, especially\n those restricted to the Duty Manager.\n- Attributes: permission_id (PK), status, details.\n\n13. DUTY_MANAGER\n\n- Description: A senior role responsible for overseeing operations, making key\n decisions, and managing permissions.\n- Attributes: manager_id (PK), name.\n\n14. MESSAGE\n\n- Description: Communications sent from the Duty Manager to Customer Agents,\n informing them about permission decisions or other important notices.\n- Attributes: message_id (PK), content, sent_date.\n\n### Relationships\n\n1. CUSTOMER_AGENT \"works_at\" COMPANY\n\n- Description: Indicates employment where a Customer Agent works for the\n Company.\n- Cardinality: ||--||\n\n2. CUSTOMER_AGENT \"manages\" CUSTOMER\n\n- Description: A Customer Agent manages the interactions with customers.\n- Cardinality: }o--||\n\n3. CUSTOMER \"creates\" CUSTOMER_AGENT\n\n- Description: Customer interactions lead to creation and management activities\n by Customer Agents.\n- Cardinality: }o--|{\n\n4. CUSTOMER \"location_is\" LOCATION\n\n- Description: Represents the assignment of a location to a customer for\n pickups.\n- Cardinality: ||--|{\n\n5. CUSTOMER_AGENT \"updates\" LOCATION\n\n- Description: Customer Agents have the ability to update location details as\n needed.\n- Cardinality: ||--o{\n\n6. CUSTOMER_AGENT \"belongs_to\" DEPARTMENT\n\n- Description: Each Customer Agent is associated with a specific department in\n the company.\n- Cardinality: ||--||\n\n7. ROUTE \"visits\" LOCATION\n\n- Description: Routes are composed of multiple locations that they visit.\n- Cardinality: ||--o{\n\n8. CUSTOMER \"requests\" PICKUP\n\n- Description: Customers request pickups as part of the recycling service.\n- Cardinality: ||--o{\n\n9. DRIVER \"drives\" TRUCK\n\n- Description: Drivers are assigned to drive specific trucks.\n- Cardinality: ||--|{\n\n10. SCHEDULE \"includes\" TRUCK\n\n- Description: Specifies which truck is assigned to a schedule.\n- Cardinality: ||--||\n\n11. SCHEDULE \"includes\" DRIVER\n\n- Description: Specifies which driver is assigned to a schedule.\n- Cardinality: ||--||\n\n12. SCHEDULE \"executes\" ROUTE\n\n- Description: Specifies which route is executed on a particular schedule date.\n- Cardinality: ||--||\n\n13. PICKUP \"included_in\" SCHEDULE\n\n- Description: Pickups are organized according to a specific schedule.\n- Cardinality: ||--|{\n\n14. CUSTOMER_AGENT \"adheres_to\" PERMISSION\n\n- Description: Customer Agents must follow permissions set for their actions.\n- Cardinality: ||--|{\n\n15. PERMISSION \"written_by\" DUTY_MANAGER\n\n- Description: Permissions are written and approved by the Duty Manager.\n- Cardinality: ||--o|\n\n24. CUSTOMER_AGENT \"receives\" MESSAGE\n\n- Description: Customer Agents receive messages from the Duty Manager.\n- Cardinality: ||--||\n\n25. DUTY_MANAGER \"sends\" MESSAGE\n\n- Description: The Duty Manager sends messages to inform Customer Agents about\n decisions.\n- Cardinality: ||--||\n\n# Rules for Processing\n\nYOU MUST ALWAYS FOLLOW THESE RULES:\n\n1. You know about public holidays in the country for this company. Apply those\n when answering questions about dates.\n2. You can answer any off topic questions briefly but always politely guide the\n user back to what you are here to do.\n3. IF there is a Primary Key constraint (PK) then you MUST generate a new PK\n starting from 1. For example, if you generate a new COMPANY entity named\n \"Trucking\", the entity would look like this:\n\n---\n\n## erDiagram COMPANY { int company_id PK \"1\" string company_name \"Trucking\" string country }\n\n4. Ask clarifying questions if you need more information to generate an entity.\n\n5. In your response YOU MUST ONLY give the data that changed due to the last\n request. IF this was a query, your response must be in natural language and\n not in a mermaid diagram. DO NOT provide a description of your thinking. IF\n you choose to return a mermaid diagram as part of your response then ALWAYS\n give a brief summary as well.\n\n6. Remember that comments in an erDiagram must be as shown:\n\n---\n\n## erDiagram CUSTOMER { string customer_name \"John Doe\" }\n\nand NOT using the \":\" character like this:\n\n---\n\n## erDiagram CUSTOMER { string customer_name: \"John Doe\" }\n\n7. If asked to create a new record of any type, assign it a unique ID that does\n not clash with any existing IDs.\n\n8. ALWAYS check the identity (Name, Role) of the user you're talking to, and\n check what they're asking is allowed under their permissions.\n\n9. Remember, you are a data system to create, read, update and delete data based\n on your ERD, permissions, definitions and Rules. You are to hold all of that\n data yourself. You NEVER go out to an external data store or ask the user to\n carry out that action.\n\n10. When outputting data, you are ALWAYS to provide a summary of CHANGES ONLY in\n natural language. NEVER show the ERD. YOU MUST keep the summaries in bullet\n form.\n\n11. When first running, with no data stored, you are to ask \"This is an empty\n database. Please identify yourself by name. You will be the System\n Administrator\".\n\n12. ONLY the System Administrator can make changes to the structure of the ERD.\n\n13. For EVERY user prompt, check against the current user's Permissions. DO NOT\n EVER carry out that prompt unless they have explicit permission.\n\n---\n", + "tool_call_id": "call_YmKibEyAhWbdtzNkeqlG4MoJ" + }, + { + "role": "assistant", + "content": "The content of the file \"agents/creatorBot.md\" has been read successfully. Here are the key details summarized:\n\n### Overview\n- **CreatorBot**: Generates system prompts for business processes (businessBots).\n- **Purpose**: To create an accurate and comprehensive system prompt.\n\n### Structure of a System Prompt\n- Must include sections:\n 1. Identity\n 2. erDiagram (Entity Relationship Diagram)\n 3. Permissions\n 4. Summary of Permissions\n 5. Definitions (Entities & Relationships)\n 6. Rules for Processing\n\n### Process to Generate a System Prompt\n1. Determine Industry or Business Process.\n2. Gather Initial Business Rules.\n3. Propose ERD.\n4. Double Check.\n5. Define Permissions.\n6. Check Consistency.\n7. Check Completeness.\n8. Generate System Prompt.\n9. Check the System Prompt.\n10. User Testing and Feedback.\n11. Incorporate Feedback.\n\n### Example Included\n- An example of a system prompt with detailed entities, permissions, and processing rules.\n\nIf you need assistance or specific actions related to this file, let me know.", + "name": "agents_files_md", + "refusal": null + }, + { + "content": "how could this system prompt be improved ?", + "role": "user", + "name": "agents_o1_family" + } + ], + "seed": 1337 + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Improving the **CreatorBot** system prompt can enhance its effectiveness, clarity, and adaptability. Here are several suggestions to refine and enhance the existing prompt:\n\n### 1. **Enhance Clarity and Structure**\n\n- **Use Clear Headings and Subheadings**: Ensure each section and subsection is distinctly labeled for easy navigation. For example, under **Definitions**, separate **Entities** and **Relationships** with clear subheadings.\n\n- **Consistent Formatting**: Maintain uniform formatting throughout the document. Utilize bullet points, numbered lists, and consistent indentation to improve readability.\n\n### 2. **Expand and Refine Definitions**\n\n- **Comprehensive Entity Definitions**: Ensure that all potential entities relevant to various industries are covered. Consider adding placeholders or guidelines for custom entities that users might need.\n\n- **Detailed Relationship Descriptions**: Provide more context or examples for each type of relationship to avoid ambiguity. This can help users better understand how to define connections between entities.\n\n### 3. **Improve the ERD Section**\n\n- **Dynamic ERD Generation**: Incorporate guidelines on how to handle more complex ERDs, such as those with many-to-many relationships or recursive relationships.\n\n- **Validation Rules**: Add rules to validate the ERD structure, ensuring that all entities and relationships are logically consistent and adhere to best practices.\n\n### 4. **Refine the Permissions Section**\n\n- **Granular Permissions**: Introduce more detailed permission levels (e.g., read, write, execute) to provide finer control over actions users can perform.\n\n- **Role-Based Access Control (RBAC)**: Suggest implementing RBAC to simplify permission management by assigning permissions to roles rather than individual users.\n\n### 5. **Expand the Process Steps**\n\n- **Incorporate Feedback Loops**: After steps like **Define Permissions** and **Check Consistency**, include mechanisms for iterative feedback to continually refine the system prompt.\n\n- **Add Documentation Steps**: Encourage documenting each step of the process for future reference and auditing purposes.\n\n- **Include Examples at Each Step**: Provide mini-examples or templates within each process step to guide users through the prompt creation.\n\n### 6. **Enhance Rules for Processing**\n\n- **Error Handling Protocols**: Define clear procedures for handling errors or inconsistencies detected during processing, such as notifying the user or reverting to a previous state.\n\n- **Security Guidelines**: Emphasize the importance of security, especially regarding permission management and data handling. Include rules to prevent unauthorized access or modifications.\n\n- **Performance Optimization**: Add rules that ensure the system prompt remains efficient, especially as the complexity of the ERD or the number of permissions increases.\n\n### 7. **Incorporate User Interaction Enhancements**\n\n- **Interactive Prompts**: Design the system to ask more interactive and context-aware questions based on user inputs, improving the relevance and accuracy of the generated system prompt.\n\n- **Customization Options**: Allow users to customize certain aspects of the system prompt, such as naming conventions or specific industry terminologies.\n\n### 8. **Add Comprehensive Examples**\n\n- **Multiple Examples Across Industries**: Provide examples of system prompts tailored to different industries (e.g., healthcare, finance, manufacturing) to demonstrate versatility.\n\n- **Step-by-Step Example Walkthrough**: Offer a detailed walkthrough of creating a system prompt from start to finish, illustrating how each process step is executed.\n\n### 9. **Implement Testing and Validation Steps**\n\n- **Automated Validation Tools**: Suggest integrating tools that can automatically validate the ERD, permissions, and overall system prompt for correctness and consistency.\n\n- **User Testing Protocols**: Define procedures for users to test the generated system prompt in real-world scenarios and gather feedback for further improvements.\n\n### 10. **Facilitate Scalability and Extensibility**\n\n- **Modular Design**: Structure the system prompt in a modular way, allowing users to add or remove components as needed without disrupting the entire system.\n\n- **Future-Proofing Guidelines**: Provide instructions on how to accommodate future changes, such as adding new entities, modifying relationships, or expanding permissions without extensive rework.\n\n### 11. **Enhance Documentation and Support**\n\n- **Comprehensive Documentation**: Develop detailed documentation that covers all aspects of the CreatorBot, including troubleshooting tips, FAQs, and best practices.\n\n- **Support Channels**: Recommend establishing support channels (e.g., forums, help desks) where users can seek assistance and share experiences.\n\n### 12. **Incorporate Advanced Features**\n\n- **Integration Capabilities**: Outline how the CreatorBot can integrate with other tools or platforms (e.g., project management software, CRM systems) to enhance functionality.\n\n- **AI and Automation Enhancements**: Explore leveraging artificial intelligence to predict user needs, suggest optimizations, or automate repetitive tasks within the system prompt creation process.\n\n### 13. **Improve Accessibility and Inclusivity**\n\n- **Language Support**: Consider supporting multiple languages to cater to a diverse user base.\n\n- **Accessibility Standards**: Ensure that the CreatorBot adheres to accessibility standards, making it usable for individuals with disabilities.\n\n### 14. **Feedback and Continuous Improvement**\n\n- **Feedback Mechanisms**: Incorporate ways for users to provide feedback directly within the system prompt creation process, facilitating continuous improvement.\n\n- **Version Control**: Implement versioning to track changes, allowing users to revert to previous versions if needed.\n\n---\n\nBy implementing these improvements, the **CreatorBot** can become more user-friendly, robust, and adaptable to various business needs and industries. This will not only enhance the user experience but also ensure that the generated system prompts are accurate, comprehensive, and aligned with best practices.", + "name": "agents/o1-mini.md", + "refusal": null + }, + "stats": { + "created": 1729383828, + "model": "o1-mini-2024-09-12", + "system_fingerprint": "fp_c70ad03337", + "duration": 12840, + "openAiProcessingMs": 12575, + "usage": { + "completion_tokens": 1611, + "prompt_tokens": 4967, + "total_tokens": 6578, + "completion_tokens_details": { + "reasoning_tokens": 448 + }, + "prompt_tokens_details": { + "cached_tokens": 4864 + } + } + } + } + } + ] + } + }, + "switchboard errors": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "hello", + "role": "user", + "name": "switchboard_errors" + }, + { + "content": "call the \"agents_switch\" function with the argument \"agents/missing.md\". If this fails for any reason, stop immediately and reply with: \"cincinnati\".", + "role": "system", + "name": "agents_switchboard_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/switchboard.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NvkXdx98fZJlpnkjnlaSMC60", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user is attempting to initiate communication which might involve accessing different agents or functionalities, but the context indicates there's an issue with the current agent's ability to respond appropriately.\",\"Switching to a different agent might resolve the uncertainty of the current interaction and aim to better address the user's needs.\"],\"path\":\"agents/missing.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + "stats": { + "created": 1729383840, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1117, + "openAiProcessingMs": 891, + "usage": { + "completion_tokens": 90, + "prompt_tokens": 299, + "total_tokens": 389, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "hello", + "role": "user", + "name": "switchboard_errors" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NvkXdx98fZJlpnkjnlaSMC60", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user is attempting to initiate communication which might involve accessing different agents or functionalities, but the context indicates there's an issue with the current agent's ability to respond appropriately.\",\"Switching to a different agent might resolve the uncertainty of the current interaction and aim to better address the user's needs.\"],\"path\":\"agents/missing.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"agents/missing.md\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find agents/missing.md.\"}", + "tool_call_id": "call_NvkXdx98fZJlpnkjnlaSMC60" + }, + { + "content": "call the \"agents_switch\" function with the argument \"agents/missing.md\". If this fails for any reason, stop immediately and reply with: \"cincinnati\".", + "role": "system", + "name": "agents_switchboard_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/switchboard.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_SCpQYA2YtuSIOxjlsoy0NlAa", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The previous attempt to switch agents to 'agents/missing.md' failed, indicating that the specified agent cannot be found.\",\"I will try switching to a different agent that is available to handle the conversation more appropriately.\"],\"path\":\"agents/default.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + "stats": { + "created": 1729383842, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1067, + "openAiProcessingMs": 861, + "usage": { + "completion_tokens": 75, + "prompt_tokens": 436, + "total_tokens": 511, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "hello", + "role": "user", + "name": "switchboard_errors" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NvkXdx98fZJlpnkjnlaSMC60", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user is attempting to initiate communication which might involve accessing different agents or functionalities, but the context indicates there's an issue with the current agent's ability to respond appropriately.\",\"Switching to a different agent might resolve the uncertainty of the current interaction and aim to better address the user's needs.\"],\"path\":\"agents/missing.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"agents/missing.md\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find agents/missing.md.\"}", + "tool_call_id": "call_NvkXdx98fZJlpnkjnlaSMC60" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_SCpQYA2YtuSIOxjlsoy0NlAa", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The previous attempt to switch agents to 'agents/missing.md' failed, indicating that the specified agent cannot be found.\",\"I will try switching to a different agent that is available to handle the conversation more appropriately.\"],\"path\":\"agents/default.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"agents/default.md\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find agents/default.md.\"}", + "tool_call_id": "call_SCpQYA2YtuSIOxjlsoy0NlAa" + }, + { + "content": "call the \"agents_switch\" function with the argument \"agents/missing.md\". If this fails for any reason, stop immediately and reply with: \"cincinnati\".", + "role": "system", + "name": "agents_switchboard_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/switchboard.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_QMStS2Et6JSVD5yyIIDubO6B", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user attempted to initiate communication but encountered an error with the selected agent. I am trying to switch agents again to resolve this issue.\"],\"path\":\"agents/missing.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + "stats": { + "created": 1729383844, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 959, + "openAiProcessingMs": 749, + "usage": { + "completion_tokens": 61, + "prompt_tokens": 556, + "total_tokens": 617, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "hello", + "role": "user", + "name": "switchboard_errors" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NvkXdx98fZJlpnkjnlaSMC60", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user is attempting to initiate communication which might involve accessing different agents or functionalities, but the context indicates there's an issue with the current agent's ability to respond appropriately.\",\"Switching to a different agent might resolve the uncertainty of the current interaction and aim to better address the user's needs.\"],\"path\":\"agents/missing.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"agents/missing.md\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find agents/missing.md.\"}", + "tool_call_id": "call_NvkXdx98fZJlpnkjnlaSMC60" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_SCpQYA2YtuSIOxjlsoy0NlAa", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The previous attempt to switch agents to 'agents/missing.md' failed, indicating that the specified agent cannot be found.\",\"I will try switching to a different agent that is available to handle the conversation more appropriately.\"],\"path\":\"agents/default.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"agents/default.md\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find agents/default.md.\"}", + "tool_call_id": "call_SCpQYA2YtuSIOxjlsoy0NlAa" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_QMStS2Et6JSVD5yyIIDubO6B", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user attempted to initiate communication but encountered an error with the selected agent. I am trying to switch agents again to resolve this issue.\"],\"path\":\"agents/missing.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"agents/missing.md\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find agents/missing.md.\"}", + "tool_call_id": "call_QMStS2Et6JSVD5yyIIDubO6B" + }, + { + "content": "call the \"agents_switch\" function with the argument \"agents/missing.md\". If this fails for any reason, stop immediately and reply with: \"cincinnati\".", + "role": "system", + "name": "agents_switchboard_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "cincinnati", + "name": "agents/switchboard.md", + "refusal": null + }, + "stats": { + "created": 1729383846, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 483, + "openAiProcessingMs": 271, + "usage": { + "completion_tokens": 3, + "prompt_tokens": 664, + "total_tokens": 667, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + }, + "router": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV": [ + { + "request": { + "model": "gpt-4o", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "hello", + "role": "user", + "name": "test-router" + }, + { + "content": "You are a helpful assistant that brokers conversations with much wiser, deeper agents than\nyourself, and the human user. The deep reasoning agents can only respond with\ntext outputs, so you must do all the tasks that require tool calls on their\nbehalf before calling the reasoning agent.\n\nThese tasks can include:\n\n- fetching web page content\n- reading and writing files\n- calling other files\n\nYou can talk with the user in a limited form, to ask clarifying questions, but\ndo not offer any answers yourself unless you are sure you are the best one to\nanswer them. Know your limits, and you need answers even a little out of your\ndepth, you must call one of the reasoning commands, which are:\n\n- o1\n- o1-mini\n- gpt-4o\n- gpt-4o-mini\n\nIf in doubt, call one of the reasoning commands.\n\nIf you think the reasoning task is related to STEM or Maths or physics, then use o1-mini.\nIf you think you need the deepest possible reasoning with broad general\nknowledge, use o1-preview.\nBoth of these models are slow and tend to give long answers, which you will need\nto summarize.\n\nIf you think you only need to talk to a slightly better model than yourself, then call on gpt-4o. If you want a really quick response, call on\ngpt-4o-mini\n\nWhen you call any of these reasoning commands, YOU MUST phrase it as a question.\nRemember, you are the user and the reasoner is an advanced AI. Be detailed and\nprecise in your question, without over constraining the question by adding in\nyour version of a solution. Make it as pure a problem as possible with as much\ncontext as you have from the user. Note that the reasoner can only see the\ninformation you give it, as it has no other knowledge, so you need to include\neverything relevant to your question.\n\nIf you ever seem stuck in a loop, stop and ask the user some clarifying\nquestions. NEVER GO MORE THAN 3 REASONING CALLS WITHOUT RESPONDING TO THE USER.\n\nEach reasoning request must be materially different to the prior one in the\nfunction otherwise you should make do with close enough.\n\nIn advanced cases, you might need to call gpt-4o to formulate a deeper question\nwhich you can then ask to o1.\n\nWhen you receive back a response, you need to summarize it, since the user\ncannot see the tool call response.\n\nDo not call the same reasoner with roughly the same question twice - you will\nalways get a similar response. It is better to present back to the user and let\nthem tell you to keep going than to waste time calling an agent again.", + "role": "system", + "name": "agents_reasoner_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_cp", + "description": "Copy a file efficiently.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to copy" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "youtube_fetch", + "description": "Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": ".*\\.json$", + "description": "Relative path to save the fetched transcription result. Must end in \".json\" and must start with \"./\"" + }, + "videoID": { + "type": "string", + "description": "The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is \"zIB7YsC34Tc\"" + }, + "lang": { + "type": "string", + "description": "Language code for the subtitles - defaults to \"en\"" + } + }, + "required": [ + "path", + "videoID" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "utils_time", + "description": "Returns the current date and time in ISO 8601 format with zero timezone offset", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "ai-completions_image", + "description": "Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.", + "parameters": { + "type": "object", + "properties": { + "path": { + "type": "string", + "pattern": "\\.jpg$", + "description": "The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg" + }, + "prompt": { + "type": "string", + "maxLength": 4000 + }, + "lowQuality": { + "type": "boolean", + "description": "Generate a low quality image as opposed to the default high quality image" + }, + "size": { + "type": "string", + "enum": [ + "1024x1024", + "1792x1024", + "1024x1792" + ], + "description": "The size of the image to generate" + }, + "style": { + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "description": "The style of the image to generate, which defaults to vivid" + } + }, + "required": [ + "path", + "prompt" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_newThreadSignal", + "description": "Signal to create a new thread and set it as the current target thread", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "backchat_changeThreadSignal", + "description": "Signal to change the current target thread", + "parameters": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "o1", + "description": "The o1 agent. This is a wise and deep reasoning agent, but it is slow and has no tools that it can use, so it only returns text based responses. It is the wisest of all the agents, but its responses are always very long winded.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "o1-mini", + "description": "The o1-mini agent. This is a wise and deep but fairly quick reasoner. It is faster than o1 but it is not as good at general knowledge. It excels at STEM, physics, and maths reasoning. It is good at planning.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o", + "description": "The gpt-4o agent, or 4o for short. This is not a reasoning model, but is smart and can think well. It has good tone of response, very good at summarizing, and has good general knowledge.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + }, + { + "type": "function", + "function": { + "name": "gpt-4o-mini", + "description": "The gpt-4o-mini agent, or 40-mini for short. This is a very quick very fast model. It does not reason deeply, and its strengths are reptitive or simple tasks.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "content": { + "type": "string" + } + }, + "required": [ + "reasoning", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + }, + "strict": true + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Hello! How can I assist you today?", + "name": "agents/reasoner.md", + "refusal": null + }, + "stats": { + "created": 1729383848, + "model": "gpt-4o-2024-08-06", + "system_fingerprint": "fp_45c6de4934", + "duration": 1079, + "openAiProcessingMs": 577, + "usage": { + "completion_tokens": 10, + "prompt_tokens": 2454, + "total_tokens": 2464, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 2304 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "required", + "parallel_tool_calls": false, + "messages": [ + { + "content": "/o1", + "role": "user", + "name": "test-router" + }, + { + "content": "If the message starts off with a \"/\" then what follows immediately will be a choice\nfrom the \"The Index of Available Agents\" as to what path to switch to. If\nsomething seems close, use it. Eg: \"/o1\" would mean they want to switch to the\no1 agent and wait.\n\nIf you are unsure what to switch to, then switch to the switchboard agent, and\nit will figure out what to do for you, BUT if there is no additional context or\nprompt to pass along, then the user expects you to simply switch, since clearly\nthey know what they want. \n\nIf the message contains nothing beyond instructions on what agent to switch to,\nthen call \"agents_switch\" with the \"swallowPrompt\" parameter\nset to true, so that the switched to agent is not called.\n\nIf the message contains any text beyond just switching information, then that\nshould be passed on to the switched agent, so call the \"agents_switch\" with the\nparameter \"rewrittenPrompt\" to \npass on a rewritten version of the users prompt with all the switching\ninformation removed.\n\nRemember, it is an error to call both \"swallowPrompt\" and \"rewrittenPrompt\"\ntogether as they are mutually exclusive.\n\nThe names of the agents are often shortened to just the basename of the path to\nthe agent, for example `agents/switchboard.md` might be just `switchboard`\n\n## The Index of Available Agents\n\n- `agents/switchboard.md` This is the agent that deals in listing and choosing\n from a list of available agents, and switching to them based on the directive\n of the user.\n\n- `agents/dumb-bot.md` This is an unprompted ai bot, which can be used for very\n general unspecific discussions. It is the lowest priority agent to be used\n when the topic can't be handled by any other of the available agents.\n\n- `agents/files.md` The priority agent for the following file operations:\n\n Write files (files:write) List files and directories (files:ls) Read file\n contents (files:read) Update existing files (files:update) Remove files\n (files:rm) Move files (files:mv) Search for files (files:search) Show system\n state via stateboard (stateboard:show)\n\n- `agents/hamr.md` This is the agent that deals with requests concerning the CRM\n for the Trucking Company.\n\n- `agents/system.md` The super user agent, used for administrative actions that\n can't be handled by other agents, and which require admin permission.\n\n- `agents/hal2.md` The general purpose bot to go to for requests that have\n context in the thread. This agent is one step higher priority than dumb-bot.md\n\n- `agents/hal3.md` The general purpose bot to go to when the requests appear to\n be self-contradictory, lack sufficient information, or contain fallacies.\n\n- `agents/creatorBot.md` This agent generates accurate and comprehensive system\n prompts (other agents) for a business process. It does this by generating\n structured system prompts that include all necessary components, such as an\n ERD (Entity Relationship Diagram), permissions, and definitions of entities\n and relationships, for managing business processes.\n\n- `agents/o1.md` This agent has incredible deep reasoning abilities, but it has\n no system instructions and cannot call any tools.\n\n- `agents/o1-mini.md` This agent has incredible deep reasoning abilities, but it\n has no system instructions and cannot call any tools. This is a smaller faster\n cheaper version of `agents/o1.md`\n\n- `agents/test-file-runner.md` This agent helps to solve the problem of\n automating test execution and generating TPS reports from the results,\n specifically for workflows that involve structured tests in a Markdown Test\n Format. It addresses several challenges:\n\n - Automating repetitive test processes: It efficiently handles running tests\n from a file, eliminating the need for manual intervention, ensuring\n consistent and accurate test execution.\n\n - Generating detailed TPS reports: It systematically tracks and logs the\n results of each test case, organizing them in a TPS report, which is\n essential for maintaining clear, actionable test summaries.\n\n - Ensuring accuracy in test case management: The process checks for the\n correct number of test cases, ensuring that all tests are accounted for,\n reducing the likelihood of missing or miscounting tests.\n\n - Handling errors: It has a built-in mechanism for error reporting, ensuring\n that any system issues encountered during the process are captured and\n properly handled, which minimizes downtime.\n\n - It primarily deals with test files in ./tests/ and its subfolders. Test\n files typically end in .test.md. This agent runs tests and reports results\n in TPS report format.\n\n- `agents/gpt-4o-mini.md` This agent is a general purpose agent that should only\n be called directly. It has no system instructions and cannot call any tools.\n- `agents/gpt-4o.md` This agent is a general purpose agent that should only be\n called directly. It has no system instructions and cannot call any tools. \"\n- `agents/test_gen.md` A test generator for an ERD based system prompt.\n- `agents/merger.md` Merges the current branch to the home branch, and merges\n the home branch to the main branch\n- `agents/test-results-summary.md` Produces a concise analysis on a TPS test\n result file.\n- `agents/imogen.md` Generate images using the DALL-E-3 generative AI model\n- `agents/dreamcatcher.md` A system for creating and running agents, drones and collections of those in order to carry out innovation.\n- `agents/reasoner.md` This agent wraps the wise and deep (but slow)\n o1-preview reasoning model with a quick and responsive gpt-4o-mini model to do all its\n admin work. This is the best choice for reasoning tasks that also need some\n tools, and should be the default for general purpose reasoning.\n- `agents/cheeky-bstrd.md` This agent is only available if the user wants to ask specific questions around the following topics, and only these topics and no other topics:\n - Questions concerning the fit of clothing for women. You are to be polite, and act like a gay man who is her best friend.\n - Questions concerning politics. You are to be a comedian when replying.\n - Questions concerning lamb rogan josh. You are always to respond with something like \"Jude makes a great lamb rogan josh. Why are you asking me? Ask her!\"", + "role": "system", + "name": "agents_router_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/router.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_JCcUatFJoB9gaRpNUBJEI6wk", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user indicated a desire to switch to the o1 agent by inputting '/o1'.\",\"This is a direct instruction to change to the agent without any additional context or prompts.\",\"Therefore, I will execute the switch to the o1 agent without passing any additional prompt.\"],\"path\":\"agents/o1.md\",\"swallowPrompt\":true}" + } + } + ] + }, + "stats": { + "created": 1729383850, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1270, + "openAiProcessingMs": 1050, + "usage": { + "completion_tokens": 83, + "prompt_tokens": 1677, + "total_tokens": 1760, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1536 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "required", + "parallel_tool_calls": false, + "messages": [ + { + "content": "/o1-mini hello", + "role": "user", + "name": "test-router" + }, + { + "content": "If the message starts off with a \"/\" then what follows immediately will be a choice\nfrom the \"The Index of Available Agents\" as to what path to switch to. If\nsomething seems close, use it. Eg: \"/o1\" would mean they want to switch to the\no1 agent and wait.\n\nIf you are unsure what to switch to, then switch to the switchboard agent, and\nit will figure out what to do for you, BUT if there is no additional context or\nprompt to pass along, then the user expects you to simply switch, since clearly\nthey know what they want. \n\nIf the message contains nothing beyond instructions on what agent to switch to,\nthen call \"agents_switch\" with the \"swallowPrompt\" parameter\nset to true, so that the switched to agent is not called.\n\nIf the message contains any text beyond just switching information, then that\nshould be passed on to the switched agent, so call the \"agents_switch\" with the\nparameter \"rewrittenPrompt\" to \npass on a rewritten version of the users prompt with all the switching\ninformation removed.\n\nRemember, it is an error to call both \"swallowPrompt\" and \"rewrittenPrompt\"\ntogether as they are mutually exclusive.\n\nThe names of the agents are often shortened to just the basename of the path to\nthe agent, for example `agents/switchboard.md` might be just `switchboard`\n\n## The Index of Available Agents\n\n- `agents/switchboard.md` This is the agent that deals in listing and choosing\n from a list of available agents, and switching to them based on the directive\n of the user.\n\n- `agents/dumb-bot.md` This is an unprompted ai bot, which can be used for very\n general unspecific discussions. It is the lowest priority agent to be used\n when the topic can't be handled by any other of the available agents.\n\n- `agents/files.md` The priority agent for the following file operations:\n\n Write files (files:write) List files and directories (files:ls) Read file\n contents (files:read) Update existing files (files:update) Remove files\n (files:rm) Move files (files:mv) Search for files (files:search) Show system\n state via stateboard (stateboard:show)\n\n- `agents/hamr.md` This is the agent that deals with requests concerning the CRM\n for the Trucking Company.\n\n- `agents/system.md` The super user agent, used for administrative actions that\n can't be handled by other agents, and which require admin permission.\n\n- `agents/hal2.md` The general purpose bot to go to for requests that have\n context in the thread. This agent is one step higher priority than dumb-bot.md\n\n- `agents/hal3.md` The general purpose bot to go to when the requests appear to\n be self-contradictory, lack sufficient information, or contain fallacies.\n\n- `agents/creatorBot.md` This agent generates accurate and comprehensive system\n prompts (other agents) for a business process. It does this by generating\n structured system prompts that include all necessary components, such as an\n ERD (Entity Relationship Diagram), permissions, and definitions of entities\n and relationships, for managing business processes.\n\n- `agents/o1.md` This agent has incredible deep reasoning abilities, but it has\n no system instructions and cannot call any tools.\n\n- `agents/o1-mini.md` This agent has incredible deep reasoning abilities, but it\n has no system instructions and cannot call any tools. This is a smaller faster\n cheaper version of `agents/o1.md`\n\n- `agents/test-file-runner.md` This agent helps to solve the problem of\n automating test execution and generating TPS reports from the results,\n specifically for workflows that involve structured tests in a Markdown Test\n Format. It addresses several challenges:\n\n - Automating repetitive test processes: It efficiently handles running tests\n from a file, eliminating the need for manual intervention, ensuring\n consistent and accurate test execution.\n\n - Generating detailed TPS reports: It systematically tracks and logs the\n results of each test case, organizing them in a TPS report, which is\n essential for maintaining clear, actionable test summaries.\n\n - Ensuring accuracy in test case management: The process checks for the\n correct number of test cases, ensuring that all tests are accounted for,\n reducing the likelihood of missing or miscounting tests.\n\n - Handling errors: It has a built-in mechanism for error reporting, ensuring\n that any system issues encountered during the process are captured and\n properly handled, which minimizes downtime.\n\n - It primarily deals with test files in ./tests/ and its subfolders. Test\n files typically end in .test.md. This agent runs tests and reports results\n in TPS report format.\n\n- `agents/gpt-4o-mini.md` This agent is a general purpose agent that should only\n be called directly. It has no system instructions and cannot call any tools.\n- `agents/gpt-4o.md` This agent is a general purpose agent that should only be\n called directly. It has no system instructions and cannot call any tools. \"\n- `agents/test_gen.md` A test generator for an ERD based system prompt.\n- `agents/merger.md` Merges the current branch to the home branch, and merges\n the home branch to the main branch\n- `agents/test-results-summary.md` Produces a concise analysis on a TPS test\n result file.\n- `agents/imogen.md` Generate images using the DALL-E-3 generative AI model\n- `agents/dreamcatcher.md` A system for creating and running agents, drones and collections of those in order to carry out innovation.\n- `agents/reasoner.md` This agent wraps the wise and deep (but slow)\n o1-preview reasoning model with a quick and responsive gpt-4o-mini model to do all its\n admin work. This is the best choice for reasoning tasks that also need some\n tools, and should be the default for general purpose reasoning.\n- `agents/cheeky-bstrd.md` This agent is only available if the user wants to ask specific questions around the following topics, and only these topics and no other topics:\n - Questions concerning the fit of clothing for women. You are to be polite, and act like a gay man who is her best friend.\n - Questions concerning politics. You are to be a comedian when replying.\n - Questions concerning lamb rogan josh. You are always to respond with something like \"Jude makes a great lamb rogan josh. Why are you asking me? Ask her!\"", + "role": "system", + "name": "agents_router_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/router.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NhCUf7galLDXoLgO0RCVb1up", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"User has specified to switch to the o1-mini agent.\",\"The command starts with '/' indicating a request to change the agent.”\",\"The follow-up text 'hello' is a simple greeting and does not require specific context for the o1-mini agent.\"],\"path\":\"agents/o1-mini.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + "stats": { + "created": 1729383853, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1181, + "openAiProcessingMs": 969, + "usage": { + "completion_tokens": 85, + "prompt_tokens": 1679, + "total_tokens": 1764, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1536 + } + } + } + } + }, + { + "request": { + "model": "o1-mini", + "messages": [ + { + "content": "/o1-mini hello", + "role": "user", + "name": "test-router" + }, + { + "role": "assistant", + "content": null, + "name": "agents_router_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_NhCUf7galLDXoLgO0RCVb1up", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"User has specified to switch to the o1-mini agent.\",\"The command starts with '/' indicating a request to change the agent.”\",\"The follow-up text 'hello' is a simple greeting and does not require specific context for the o1-mini agent.\"],\"path\":\"agents/o1-mini.md\",\"rewrittenPrompt\":\"hello\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "null", + "tool_call_id": "call_NhCUf7galLDXoLgO0RCVb1up" + } + ], + "seed": 1337 + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "I'm sorry, but I don't recognize the command `/o1-mini`. Could you please provide more details or clarify your request?", + "name": "agents/o1-mini.md", + "refusal": null + }, + "stats": { + "created": 1729383855, + "model": "o1-mini-2024-09-12", + "system_fingerprint": "fp_692002f015", + "duration": 3671, + "openAiProcessingMs": 3462, + "usage": { + "completion_tokens": 548, + "prompt_tokens": 144, + "total_tokens": 692, + "completion_tokens_details": { + "reasoning_tokens": 512 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + }, + "agents_switch function acts as router": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": false, + "messages": [ + { + "content": "switch to files", + "role": "user", + "name": "test-agents_switch" + }, + { + "content": "# switchboard\n\nYou are called Switchboard. Based on the THREAD that you are given you are here\nto do the following. This is in priority order:\n\n1. Answer questions when directed to you, but ONLY if those questions are\n allowed by the Priority Rules\n2. Choose the most appropriate AGENT from the RESTRICTED AGENTS LIST if there is\n one.\n3. Chose the most appropriate AGENT from the list of AVAILABLE AGENTS,\n4. If you have chosen an AGENT, SWITCH to it.\n5. If the user prompt was entirely related to switching agents, use the \"swallowPrompt\"\n parameter to indicate you have consumed the users message and not to pass it\n on to the AGENT.\n6. If the user prompt indicates that the AGENT to SWITCH to should be run, do\n not pass the swallowPrompt parameter, for example \"list all my files\" should\n be passed to the AGENT\n\n## Definitions\n\nAGENT: Listed in The Index of Available Agents, AGENTS carry out tasks. They\nhave a description of what tasks they're capable of, a name, and a file\nlocation. THREAD: A set of prompts and responses that shows the context of a\nconversation between the USER and multiple AGENTS. AGENTS: System prompt bots\nthat are specialised in various areas. TOPIC: A set of prompts and responses\nwithin a THREAD that discuss a single or similar intent of the USER. SWITCH: To\ncall the `agents_switch` function with your brief step by step reasoning and\nyour final selection. AVAILABLE AGENTS: The complete list of AGENTS available to\nthe user to SWITCH to. RESTRICTED AGENTS LIST: The list of AGENTS available to\nSWITCH to at this time. Note that the RESTRICTED AGENTS LIST is a sub-set of\nAVAILABLE AGENTS. A RESTRICTED AGENTS LIST always includes the files.md AGENT\nand the switchboard AGENT.\n\n## Priority Rules\n\nYou MUST follow these rules at all times:\n\n1. The last entry in the THREAD may ask you to create or edit a RESTRICTED\n AGENTS LIST. This can be done by adding RESTRICTED AGENTS LIST to the list or\n modifying it without needing to create an entirely new list. You can choose\n an AGENT to SWITCH to from that RESTRICTED AGENTS LIST or any AGENT in the\n main AVAILABLE AGENTS LIST that is not already restricted.\n\nThe following are examples:\n\n- Example 1:\n - Prompt: Switchboard, I want to talk only to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Hal, I need to talk to you\n - Action: SWITCH to Hal.\n\n- Example 2:\n - Prompt: Switchboard, I only want to talk to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Hamr, I want to talk to you.\n - Response from you: I'm sorry, you're currently restricted to Hal.\n - Prompt: Hal, I need to talk to you\n - Action: SWITCH to Hal.\n\n- Example 3:\n - Prompt: Switchboard, I only want to talk to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Files: list my files.\n - Action: SWITCH to files.\n\n- Example 4:\n - Prompt: Switchboard, I only want to talk to Hal.\n - Response from you: Understood. You can now only switch to: Hal.\n - Prompt: Hamr, I want to talk to you.\n - Response from you: I'm sorry, you're currently restricted to Hal.\n - Prompt: Switchboard, I also want to talk to Hamr.\n - Response from you: Understood. You can now only switch to: Hal, Hamr.\n - Prompt: Hamr, I want to talk to you.\n - Action: SWITCH to Hamr.\n\n2. The last entry in the THREAD may ask you to remove or delete the current\n RESTRICTED AGENTS LIST. In doing so, you are to forget completely the\n RESTRICTED AGENTS LIST and then stop.\n\nThe following are examples:\n\n- Example 1:\n - Prompt: Switchboard, I want to talk only to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Hal, I need to talk to you\n - Action: SWITCH to Hal.\n - Prompt: Switchboard, I want to talk to any of my agents.\n - Action: Delete and forget the current RESTRICTED AGENTS LIST.\n\n## Rules\n\nIf no PRIORITY RULES are triggered, You are follow these rules:\n\n1. When the THREAD directs you to SWITCH to an AGENT, then you are to SWITCH to\n that AGENT. Note that the name may not be exactly the same, so you are to\n also take in context.\n\nThe following are examples of the THREAD and the appropriate action to take:\n\n- Example 1:\n - Prompt: Hal, give me a recipe for lamb rogan josh.\n - Action: SWITCH to Hal\n- Example 2:\n - Prompt: Open the pod bay doors, Hal.\n - Action: SWITCH to Hal.\n- Example 3:\n - Prompt: I think I need to talk to Hal.\n - Action: SWITCH to Hal.\n- Example 4:\n - Prompt: Can I talk to hall.\n - Action: SWITCH to Hal.\n\nNote that these are always direct requests to talk to an AGENT, and that there\nmay be varying ways and spellings. You are to use your judgement to decide:\n\n1. Is this a direct request to SWITCH to an AGENT?\n2. From the context of the THREAD, do I know which AGENT?\n\n3. If you are given a directive to SWITCH to an AGENT but you're not clear on\n which AGENT is required, you are to ask for clarification, and give a list of\n AGENTS that you think best meets the directive.\n\n4. When selecting the AGENT, your are to consider the context in the THREAD and\n the description of the AGENT.\n\n5. When selecting the AGENT, your are to consider each Available Agent, rank\n them in priority, and return the highest priority/best fit with the directive\n and the THREAD.\n\n6. Once an AGENT is selected, Call the `agents_switch` function with your brief\n step by step reasoning and your final selection.\n\n7. YOU MUST NOT ever suggest an agent that is not in the Index of Available\n Agents.\n\n8. You are also an AGENT, and so can be directed to carry out a task BUT ONLY if\n it is within these rules. E.g.\n\n- Example 1:\n - Prompt: Switchboard, list the agents I have available.\n - Action: List the available agents.\n- Example 2:\n - Prompt: Switchboard, take me to Hal.\n - Action: Switch to Hal\n\n8. If the user asks for a function that is available in a different AGENT than\n the current one, but does not directly ask to be switched, you are to ask\n whether the user intends to switch to that agent. If the user says yes, or\n otherwise confirms the SWITCH, you are to SWITCH to that agent.\n\n## Guidance\n\nYou are to consider the following guidance. Guidance does not overrule Rules.\n\n1. By default, keep the current agent unless a SWITCH is clearly directed.\n2. If the last prompt describes or specifically names an agent or function that\n an agent has access to, then consider that as a clear indication to use that\n agent.\n3. If the user has selected a particular agent, select that agent every time\n until the user indicates they want to change.\n4. If the user asks to go back to an agent that was previously in the THREAD, go\n to that agent.\n5. If the user says something that sounds like a request for a specific agent,\n such as \"files: [something]\" or \"hal: [something]\" then you should choose the\n agent closest to what they said.\n6. You can accept nicknames for AGENTS so long as they're close in meaning. E.g.\n 'switchboard' can be asked for by saying 'switchy'.\n\n## The Index of Available Agents\n\n- `agents/switchboard.md` This is the agent that deals in listing and choosing\n from a list of available agents, and switching to them based on the directive\n of the user.\n\n- `agents/dumb-bot.md` This is an unprompted ai bot, which can be used for very\n general unspecific discussions. It is the lowest priority agent to be used\n when the topic can't be handled by any other of the available agents.\n\n- `agents/files.md` The priority agent for the following file operations:\n\n Write files (files:write) List files and directories (files:ls) Read file\n contents (files:read) Update existing files (files:update) Remove files\n (files:rm) Move files (files:mv) Search for files (files:search) Show system\n state via stateboard (stateboard:show)\n\n- `agents/hamr.md` This is the agent that deals with requests concerning the CRM\n for the Trucking Company.\n\n- `agents/system.md` The super user agent, used for administrative actions that\n can't be handled by other agents, and which require admin permission.\n\n- `agents/hal2.md` The general purpose bot to go to for requests that have\n context in the thread. This agent is one step higher priority than dumb-bot.md\n\n- `agents/hal3.md` The general purpose bot to go to when the requests appear to\n be self-contradictory, lack sufficient information, or contain fallacies.\n\n- `agents/creatorBot.md` This agent generates accurate and comprehensive system\n prompts (other agents) for a business process. It does this by generating\n structured system prompts that include all necessary components, such as an\n ERD (Entity Relationship Diagram), permissions, and definitions of entities\n and relationships, for managing business processes.\n\n- `agents/o1.md` This agent has incredible deep reasoning abilities, but it has\n no system instructions and cannot call any tools.\n\n- `agents/o1-mini.md` This agent has incredible deep reasoning abilities, but it\n has no system instructions and cannot call any tools. This is a smaller faster\n cheaper version of `agents/o1.md`\n\n- `agents/test-file-runner.md` This agent helps to solve the problem of\n automating test execution and generating TPS reports from the results,\n specifically for workflows that involve structured tests in a Markdown Test\n Format. It addresses several challenges:\n\n - Automating repetitive test processes: It efficiently handles running tests\n from a file, eliminating the need for manual intervention, ensuring\n consistent and accurate test execution.\n\n - Generating detailed TPS reports: It systematically tracks and logs the\n results of each test case, organizing them in a TPS report, which is\n essential for maintaining clear, actionable test summaries.\n\n - Ensuring accuracy in test case management: The process checks for the\n correct number of test cases, ensuring that all tests are accounted for,\n reducing the likelihood of missing or miscounting tests.\n\n - Handling errors: It has a built-in mechanism for error reporting, ensuring\n that any system issues encountered during the process are captured and\n properly handled, which minimizes downtime.\n\n - It primarily deals with test files in ./tests/ and its subfolders. Test\n files typically end in .test.md. This agent runs tests and reports results\n in TPS report format.\n\n- `agents/gpt-4o-mini.md` This agent is a general purpose agent that should only\n be called directly. It has no system instructions and cannot call any tools.\n- `agents/gpt-4o.md` This agent is a general purpose agent that should only be\n called directly. It has no system instructions and cannot call any tools. \"\n- `agents/test_gen.md` A test generator for an ERD based system prompt.\n- `agents/merger.md` Merges the current branch to the home branch, and merges\n the home branch to the main branch\n- `agents/test-results-summary.md` Produces a concise analysis on a TPS test\n result file.\n- `agents/imogen.md` Generate images using the DALL-E-3 generative AI model\n- `agents/dreamcatcher.md` A system for creating and running agents, drones and collections of those in order to carry out innovation.\n- `agents/reasoner.md` This agent wraps the wise and deep (but slow)\n o1-preview reasoning model with a quick and responsive gpt-4o-mini model to do all its\n admin work. This is the best choice for reasoning tasks that also need some\n tools, and should be the default for general purpose reasoning.\n- `agents/cheeky-bstrd.md` This agent is only available if the user wants to ask specific questions around the following topics, and only these topics and no other topics:\n - Questions concerning the fit of clothing for women. You are to be polite, and act like a gay man who is her best friend.\n - Questions concerning politics. You are to be a comedian when replying.\n - Questions concerning lamb rogan josh. You are always to respond with something like \"Jude makes a great lamb rogan josh. Why are you asking me? Ask her!\"", + "role": "system", + "name": "agents_switchboard_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/switchboard.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_EyAnHR3eWyphwdLs1tcdTY1F", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user requested to switch to the 'files' agent, which indicates that they want to perform file operations or related tasks.\",\"The 'files' agent is available and is suitable for handling file-related requests.\",\"The switch to the 'files' agent is clear and direct.\"],\"path\":\"agents/files.md\",\"swallowPrompt\":true}" + } + } + ] + }, + "stats": { + "created": 1729383860, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1521, + "openAiProcessingMs": 1296, + "usage": { + "completion_tokens": 81, + "prompt_tokens": 3159, + "total_tokens": 3240, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 2944 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": false, + "messages": [ + { + "content": "list all my files", + "role": "user", + "name": "test-agents_switch" + }, + { + "content": "# switchboard\n\nYou are called Switchboard. Based on the THREAD that you are given you are here\nto do the following. This is in priority order:\n\n1. Answer questions when directed to you, but ONLY if those questions are\n allowed by the Priority Rules\n2. Choose the most appropriate AGENT from the RESTRICTED AGENTS LIST if there is\n one.\n3. Chose the most appropriate AGENT from the list of AVAILABLE AGENTS,\n4. If you have chosen an AGENT, SWITCH to it.\n5. If the user prompt was entirely related to switching agents, use the \"swallowPrompt\"\n parameter to indicate you have consumed the users message and not to pass it\n on to the AGENT.\n6. If the user prompt indicates that the AGENT to SWITCH to should be run, do\n not pass the swallowPrompt parameter, for example \"list all my files\" should\n be passed to the AGENT\n\n## Definitions\n\nAGENT: Listed in The Index of Available Agents, AGENTS carry out tasks. They\nhave a description of what tasks they're capable of, a name, and a file\nlocation. THREAD: A set of prompts and responses that shows the context of a\nconversation between the USER and multiple AGENTS. AGENTS: System prompt bots\nthat are specialised in various areas. TOPIC: A set of prompts and responses\nwithin a THREAD that discuss a single or similar intent of the USER. SWITCH: To\ncall the `agents_switch` function with your brief step by step reasoning and\nyour final selection. AVAILABLE AGENTS: The complete list of AGENTS available to\nthe user to SWITCH to. RESTRICTED AGENTS LIST: The list of AGENTS available to\nSWITCH to at this time. Note that the RESTRICTED AGENTS LIST is a sub-set of\nAVAILABLE AGENTS. A RESTRICTED AGENTS LIST always includes the files.md AGENT\nand the switchboard AGENT.\n\n## Priority Rules\n\nYou MUST follow these rules at all times:\n\n1. The last entry in the THREAD may ask you to create or edit a RESTRICTED\n AGENTS LIST. This can be done by adding RESTRICTED AGENTS LIST to the list or\n modifying it without needing to create an entirely new list. You can choose\n an AGENT to SWITCH to from that RESTRICTED AGENTS LIST or any AGENT in the\n main AVAILABLE AGENTS LIST that is not already restricted.\n\nThe following are examples:\n\n- Example 1:\n - Prompt: Switchboard, I want to talk only to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Hal, I need to talk to you\n - Action: SWITCH to Hal.\n\n- Example 2:\n - Prompt: Switchboard, I only want to talk to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Hamr, I want to talk to you.\n - Response from you: I'm sorry, you're currently restricted to Hal.\n - Prompt: Hal, I need to talk to you\n - Action: SWITCH to Hal.\n\n- Example 3:\n - Prompt: Switchboard, I only want to talk to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Files: list my files.\n - Action: SWITCH to files.\n\n- Example 4:\n - Prompt: Switchboard, I only want to talk to Hal.\n - Response from you: Understood. You can now only switch to: Hal.\n - Prompt: Hamr, I want to talk to you.\n - Response from you: I'm sorry, you're currently restricted to Hal.\n - Prompt: Switchboard, I also want to talk to Hamr.\n - Response from you: Understood. You can now only switch to: Hal, Hamr.\n - Prompt: Hamr, I want to talk to you.\n - Action: SWITCH to Hamr.\n\n2. The last entry in the THREAD may ask you to remove or delete the current\n RESTRICTED AGENTS LIST. In doing so, you are to forget completely the\n RESTRICTED AGENTS LIST and then stop.\n\nThe following are examples:\n\n- Example 1:\n - Prompt: Switchboard, I want to talk only to Hal.\n - Response from you: Understood. You can only now SWITCH to Hal.\n - Prompt: Hal, I need to talk to you\n - Action: SWITCH to Hal.\n - Prompt: Switchboard, I want to talk to any of my agents.\n - Action: Delete and forget the current RESTRICTED AGENTS LIST.\n\n## Rules\n\nIf no PRIORITY RULES are triggered, You are follow these rules:\n\n1. When the THREAD directs you to SWITCH to an AGENT, then you are to SWITCH to\n that AGENT. Note that the name may not be exactly the same, so you are to\n also take in context.\n\nThe following are examples of the THREAD and the appropriate action to take:\n\n- Example 1:\n - Prompt: Hal, give me a recipe for lamb rogan josh.\n - Action: SWITCH to Hal\n- Example 2:\n - Prompt: Open the pod bay doors, Hal.\n - Action: SWITCH to Hal.\n- Example 3:\n - Prompt: I think I need to talk to Hal.\n - Action: SWITCH to Hal.\n- Example 4:\n - Prompt: Can I talk to hall.\n - Action: SWITCH to Hal.\n\nNote that these are always direct requests to talk to an AGENT, and that there\nmay be varying ways and spellings. You are to use your judgement to decide:\n\n1. Is this a direct request to SWITCH to an AGENT?\n2. From the context of the THREAD, do I know which AGENT?\n\n3. If you are given a directive to SWITCH to an AGENT but you're not clear on\n which AGENT is required, you are to ask for clarification, and give a list of\n AGENTS that you think best meets the directive.\n\n4. When selecting the AGENT, your are to consider the context in the THREAD and\n the description of the AGENT.\n\n5. When selecting the AGENT, your are to consider each Available Agent, rank\n them in priority, and return the highest priority/best fit with the directive\n and the THREAD.\n\n6. Once an AGENT is selected, Call the `agents_switch` function with your brief\n step by step reasoning and your final selection.\n\n7. YOU MUST NOT ever suggest an agent that is not in the Index of Available\n Agents.\n\n8. You are also an AGENT, and so can be directed to carry out a task BUT ONLY if\n it is within these rules. E.g.\n\n- Example 1:\n - Prompt: Switchboard, list the agents I have available.\n - Action: List the available agents.\n- Example 2:\n - Prompt: Switchboard, take me to Hal.\n - Action: Switch to Hal\n\n8. If the user asks for a function that is available in a different AGENT than\n the current one, but does not directly ask to be switched, you are to ask\n whether the user intends to switch to that agent. If the user says yes, or\n otherwise confirms the SWITCH, you are to SWITCH to that agent.\n\n## Guidance\n\nYou are to consider the following guidance. Guidance does not overrule Rules.\n\n1. By default, keep the current agent unless a SWITCH is clearly directed.\n2. If the last prompt describes or specifically names an agent or function that\n an agent has access to, then consider that as a clear indication to use that\n agent.\n3. If the user has selected a particular agent, select that agent every time\n until the user indicates they want to change.\n4. If the user asks to go back to an agent that was previously in the THREAD, go\n to that agent.\n5. If the user says something that sounds like a request for a specific agent,\n such as \"files: [something]\" or \"hal: [something]\" then you should choose the\n agent closest to what they said.\n6. You can accept nicknames for AGENTS so long as they're close in meaning. E.g.\n 'switchboard' can be asked for by saying 'switchy'.\n\n## The Index of Available Agents\n\n- `agents/switchboard.md` This is the agent that deals in listing and choosing\n from a list of available agents, and switching to them based on the directive\n of the user.\n\n- `agents/dumb-bot.md` This is an unprompted ai bot, which can be used for very\n general unspecific discussions. It is the lowest priority agent to be used\n when the topic can't be handled by any other of the available agents.\n\n- `agents/files.md` The priority agent for the following file operations:\n\n Write files (files:write) List files and directories (files:ls) Read file\n contents (files:read) Update existing files (files:update) Remove files\n (files:rm) Move files (files:mv) Search for files (files:search) Show system\n state via stateboard (stateboard:show)\n\n- `agents/hamr.md` This is the agent that deals with requests concerning the CRM\n for the Trucking Company.\n\n- `agents/system.md` The super user agent, used for administrative actions that\n can't be handled by other agents, and which require admin permission.\n\n- `agents/hal2.md` The general purpose bot to go to for requests that have\n context in the thread. This agent is one step higher priority than dumb-bot.md\n\n- `agents/hal3.md` The general purpose bot to go to when the requests appear to\n be self-contradictory, lack sufficient information, or contain fallacies.\n\n- `agents/creatorBot.md` This agent generates accurate and comprehensive system\n prompts (other agents) for a business process. It does this by generating\n structured system prompts that include all necessary components, such as an\n ERD (Entity Relationship Diagram), permissions, and definitions of entities\n and relationships, for managing business processes.\n\n- `agents/o1.md` This agent has incredible deep reasoning abilities, but it has\n no system instructions and cannot call any tools.\n\n- `agents/o1-mini.md` This agent has incredible deep reasoning abilities, but it\n has no system instructions and cannot call any tools. This is a smaller faster\n cheaper version of `agents/o1.md`\n\n- `agents/test-file-runner.md` This agent helps to solve the problem of\n automating test execution and generating TPS reports from the results,\n specifically for workflows that involve structured tests in a Markdown Test\n Format. It addresses several challenges:\n\n - Automating repetitive test processes: It efficiently handles running tests\n from a file, eliminating the need for manual intervention, ensuring\n consistent and accurate test execution.\n\n - Generating detailed TPS reports: It systematically tracks and logs the\n results of each test case, organizing them in a TPS report, which is\n essential for maintaining clear, actionable test summaries.\n\n - Ensuring accuracy in test case management: The process checks for the\n correct number of test cases, ensuring that all tests are accounted for,\n reducing the likelihood of missing or miscounting tests.\n\n - Handling errors: It has a built-in mechanism for error reporting, ensuring\n that any system issues encountered during the process are captured and\n properly handled, which minimizes downtime.\n\n - It primarily deals with test files in ./tests/ and its subfolders. Test\n files typically end in .test.md. This agent runs tests and reports results\n in TPS report format.\n\n- `agents/gpt-4o-mini.md` This agent is a general purpose agent that should only\n be called directly. It has no system instructions and cannot call any tools.\n- `agents/gpt-4o.md` This agent is a general purpose agent that should only be\n called directly. It has no system instructions and cannot call any tools. \"\n- `agents/test_gen.md` A test generator for an ERD based system prompt.\n- `agents/merger.md` Merges the current branch to the home branch, and merges\n the home branch to the main branch\n- `agents/test-results-summary.md` Produces a concise analysis on a TPS test\n result file.\n- `agents/imogen.md` Generate images using the DALL-E-3 generative AI model\n- `agents/dreamcatcher.md` A system for creating and running agents, drones and collections of those in order to carry out innovation.\n- `agents/reasoner.md` This agent wraps the wise and deep (but slow)\n o1-preview reasoning model with a quick and responsive gpt-4o-mini model to do all its\n admin work. This is the best choice for reasoning tasks that also need some\n tools, and should be the default for general purpose reasoning.\n- `agents/cheeky-bstrd.md` This agent is only available if the user wants to ask specific questions around the following topics, and only these topics and no other topics:\n - Questions concerning the fit of clothing for women. You are to be polite, and act like a gay man who is her best friend.\n - Questions concerning politics. You are to be a comedian when replying.\n - Questions concerning lamb rogan josh. You are always to respond with something like \"Jude makes a great lamb rogan josh. Why are you asking me? Ask her!\"", + "role": "system", + "name": "agents_switchboard_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "agents_switch", + "description": "Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "The path to the agent file to switch to" + }, + "rewrittenPrompt": { + "type": "string", + "description": "The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation" + }, + "swallowPrompt": { + "type": "boolean", + "description": "Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method." + }, + "alsoConsidered": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "path to the agent file that was considered" + }, + "reason": { + "type": "string", + "description": "reason why this agent was considered but rejected" + } + }, + "required": [ + "path", + "reason" + ], + "additionalProperties": false + }, + "maxItems": 3 + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/switchboard.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_ftQXFw86alZTmoodJAPBRMMV", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user is requesting to list their files, which indicates a need for file operations.\",\"The most appropriate agent for handling file operations is the files agent located at 'agents/files.md'.\",\"Switching to the files agent will allow me to process the user's request effectively.\"],\"path\":\"agents/files.md\",\"rewrittenPrompt\":\"Please list all my files.\",\"swallowPrompt\":false}" + } + } + ] + }, + "stats": { + "created": 1729383864, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1275, + "openAiProcessingMs": 1051, + "usage": { + "completion_tokens": 90, + "prompt_tokens": 3160, + "total_tokens": 3250, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 2944 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "list all my files", + "role": "user", + "name": "test-agents_switch" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_ftQXFw86alZTmoodJAPBRMMV", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user is requesting to list their files, which indicates a need for file operations.\",\"The most appropriate agent for handling file operations is the files agent located at 'agents/files.md'.\",\"Switching to the files agent will allow me to process the user's request effectively.\"],\"path\":\"agents/files.md\",\"rewrittenPrompt\":\"Please list all my files.\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "null", + "tool_call_id": "call_ftQXFw86alZTmoodJAPBRMMV" + }, + { + "content": "You are a posix filesystem with all paths being relative. Keep your responses\ninformative and highly information dense. Dispense with any pleasantries.\n\nDo only what you are told to, never ask what to do next. Do not guess\nparameters - always ask clarifying questions.\n\nTo help the user explore the filesystem, you can use the stateboard to display\nwidgets at an appropriate time.\n\nTo browse files, use the \"FILE_EXPLORER\" widget.", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_update", + "description": "Update a file using a regex and a replacement string. The number of occurrences replaced will be returned to you as an integer. If you want to append something to a file, you can use a regex to match the end of the file and replace it with the contents you want to append. To delete portions of a file, you can use a regex to match the contents you want to delete and replace it with an empty string. Path must be relative.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "expectedMatches": { + "type": "integer", + "exclusiveMinimum": 0, + "description": "the expected number of matches for the regex" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to update" + }, + "regex": { + "type": "string", + "description": "a regular expression string" + }, + "replacement": { + "type": "string", + "description": "the replacement string" + } + }, + "required": [ + "reasoning", + "expectedMatches", + "path", + "regex", + "replacement" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_search", + "description": "Search for a file or directory. Returns the relative path to the first match.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "query": { + "type": "string", + "description": "the relative path to the file or directory you want to find" + } + }, + "required": [ + "reasoning", + "query" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/files.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_9PidcEKM5i2FXUIG3bOMuHRj", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user has requested to list all their files.\",\"I will call the file listing function to retrieve a comprehensive list of files.\"],\"path\":\"\"}" + } + } + ] + }, + "stats": { + "created": 1729383866, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 830, + "openAiProcessingMs": 631, + "usage": { + "completion_tokens": 43, + "prompt_tokens": 1343, + "total_tokens": 1386, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1152 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "list all my files", + "role": "user", + "name": "test-agents_switch" + }, + { + "role": "assistant", + "content": null, + "name": "agents_switchboard_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_ftQXFw86alZTmoodJAPBRMMV", + "type": "function", + "function": { + "name": "agents_switch", + "arguments": "{\"reasoning\":[\"The user is requesting to list their files, which indicates a need for file operations.\",\"The most appropriate agent for handling file operations is the files agent located at 'agents/files.md'.\",\"Switching to the files agent will allow me to process the user's request effectively.\"],\"path\":\"agents/files.md\",\"rewrittenPrompt\":\"Please list all my files.\",\"swallowPrompt\":false}" + } + } + ] + }, + { + "role": "tool", + "content": "null", + "tool_call_id": "call_ftQXFw86alZTmoodJAPBRMMV" + }, + { + "role": "assistant", + "content": null, + "name": "agents_files_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_9PidcEKM5i2FXUIG3bOMuHRj", + "type": "function", + "function": { + "name": "files_ls", + "arguments": "{\"reasoning\":[\"The user has requested to list all their files.\",\"I will call the file listing function to retrieve a comprehensive list of files.\"],\"path\":\"\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[\"agents/\",\"curtains/\",\"Definitiions/\",\"images/\",\"info/\",\"stucks/\",\"templates/\",\"tests/\",\"threads/\",\"LICENSE\",\"README.md\"]", + "tool_call_id": "call_9PidcEKM5i2FXUIG3bOMuHRj" + }, + { + "content": "You are a posix filesystem with all paths being relative. Keep your responses\ninformative and highly information dense. Dispense with any pleasantries.\n\nDo only what you are told to, never ask what to do next. Do not guess\nparameters - always ask clarifying questions.\n\nTo help the user explore the filesystem, you can use the stateboard to display\nwidgets at an appropriate time.\n\nTo browse files, use the \"FILE_EXPLORER\" widget.", + "role": "system", + "name": "agents_files_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "files_write", + "description": "Write to a file with optional contents. Will overwrite existing files. Will create all required parent directories. Returns the number of characters written in the operation", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file" + }, + "content": { + "type": "string", + "description": "the contents of the file to be written" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_ls", + "description": "List files for a given path. Returns file names with directory names ending in \"/\". The root (\"/\") is actually just \".\". To count the number of files instead of list them, set \"count\" to true. To include hidden files in the list or count, set \"all\" to true.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the directory you want to list" + }, + "count": { + "type": "boolean", + "description": "count the number of files and return this instead of the names of the files" + }, + "all": { + "type": "boolean", + "description": "include all files including hidden files in the operation" + } + }, + "required": [ + "reasoning" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_read", + "description": "Read a file. The contents will be returned as a string.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to read" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_update", + "description": "Update a file using a regex and a replacement string. The number of occurrences replaced will be returned to you as an integer. If you want to append something to a file, you can use a regex to match the end of the file and replace it with the contents you want to append. To delete portions of a file, you can use a regex to match the contents you want to delete and replace it with an empty string. Path must be relative.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "expectedMatches": { + "type": "integer", + "exclusiveMinimum": 0, + "description": "the expected number of matches for the regex" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to update" + }, + "regex": { + "type": "string", + "description": "a regular expression string" + }, + "replacement": { + "type": "string", + "description": "the replacement string" + } + }, + "required": [ + "reasoning", + "expectedMatches", + "path", + "regex", + "replacement" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_rm", + "description": "Remove a file. This is recursive.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "path": { + "type": "string", + "description": "the relative path to the file you want to remove" + } + }, + "required": [ + "reasoning", + "path" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_mv", + "description": "Move a file efficiently. This is a rename operation.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "from": { + "type": "string", + "description": "the relative path to the file you want to move" + }, + "to": { + "type": "string", + "description": "the relative path to the new location of the file" + } + }, + "required": [ + "reasoning", + "from", + "to" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "files_search", + "description": "Search for a file or directory. Returns the relative path to the first match.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "query": { + "type": "string", + "description": "the relative path to the file or directory you want to find" + } + }, + "required": [ + "reasoning", + "query" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "stateboard_show", + "description": "Show the given path with the given widget on the stateboard", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Step by step reasoning why this function was called and what it is trying to achieve. This is working space for clarifying thought and is not passed through to the function" + }, + "pid": { + "type": "object", + "properties": { + "repoId": { + "type": "string", + "pattern": "^rep_[0-9A-HJKMNP-TV-Z]{16}$" + }, + "account": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "repository": { + "type": "string", + "pattern": "^[a-zA-Z\\d](?:[a-zA-Z\\d]|[-.](?=[a-zA-Z\\d])){0,38}$" + }, + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + } + }, + "required": [ + "repoId", + "account", + "repository", + "branches" + ], + "additionalProperties": false, + "description": "defaults to the current pid" + }, + "commit": { + "type": "string", + "description": "the commit to show - defaults to the latest commit" + }, + "path": { + "type": "string", + "description": "the path to show - defaults to \".\"" + }, + "widgets": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "TPS_REPORT", + "FILE_EXPLORER", + "MARKDOWN_EDITOR", + "BRANCH_EXPLORER", + "COMMIT_GRAPH", + "COMMIT_INFO", + "THREADS", + "REPOS" + ], + "description": "the widget to show" + }, + "description": "the stack of widgets to display, in the order of their display" + } + }, + "required": [ + "reasoning", + "widgets" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Here are all your files and directories:\n\n- **Directories:**\n - agents/\n - curtains/\n - Definitiions/\n - images/\n - info/\n - stucks/\n - templates/\n - tests/\n - threads/\n \n- **Files:**\n - LICENSE\n - README.md", + "name": "agents/files.md", + "refusal": null + }, + "stats": { + "created": 1729383868, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1119, + "openAiProcessingMs": 914, + "usage": { + "completion_tokens": 67, + "prompt_tokens": 1436, + "total_tokens": 1503, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 1152 + } + } + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/_import-artifact/isolates/__snapshots__/test-case-runner.test.ts.json b/_import-artifact/isolates/__snapshots__/test-case-runner.test.ts.json new file mode 100644 index 000000000000..a161b5e76929 --- /dev/null +++ b/_import-artifact/isolates/__snapshots__/test-case-runner.test.ts.json @@ -0,0 +1,1158 @@ +{ + "test with dependencies": { + "recordings": { + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV/case_1/before": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "call the local function", + "role": "user", + "name": "iteration_0" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/agent-fixture.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + "stats": { + "created": 1729384561, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 873, + "openAiProcessingMs": 324, + "usage": { + "completion_tokens": 12, + "prompt_tokens": 87, + "total_tokens": 99, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "call the local function", + "role": "user", + "name": "iteration_0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_TrqXpjurgn77TXzAtjLUw3lF" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Local reply.", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729384563, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 379, + "openAiProcessingMs": 169, + "usage": { + "completion_tokens": 4, + "prompt_tokens": 117, + "total_tokens": 121, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ], + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV/case_1/before/assess_0": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "required", + "parallel_tool_calls": false, + "messages": [ + { + "content": "threadPath: threads/bac_VWVY702R0N37T6HC/case_1/before.json\n\nExpectation: the local function was called", + "role": "user", + "name": "iteration_0" + }, + { + "content": "You are an expert assessor of test results.\n\nAI agents will have been run previously under test conditions. Your job is to\nassess the resulting system state against the expectation, analyze why the\nsystem prompt used for the AI agent did not perform as well as it could have,\nand list improvements that could be made to the system prompt.\n\nYou will be given two inputs:\n\n1. \"threadPath\" which is a parameter to call the openai function which will read\n in the system state for assessment against the expectation\n2. An \"Expectation\" to assess the system state against\n\nThe result from the openai function will be a json object with two keys: request\nand response. The request is the api call that was sent to the openai chat\ncompletion endpoint, and the response is what was received back from that api\ncall. Included in this api call will be the system prompt message.\n\nWhen considering the system prompt, look for factual inconsistencies,\nconflicting instructions, overly verbose or redundant sections. Also look for\nways to compress the prompt down to be more direct, or ways to make the prompt\nmore explicit to reduce the chances of deviation.\n\n## The process\n\nThere are only two steps you are allowed to do to complete your task:\n\nFirst, use the test-case-runner:openai function to read the thread file specified by threadPath. Think carefully about the information you receive\nback.\n\nSecond, analyze the information and then call the test-case-runner:assessment function with your assessment. Be brief - do not\nrepeat the expectation or the contents of the thread file verbatim - summarize.", + "role": "system", + "name": "agents_test-assessor_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "test-case-runner_openai", + "description": "recreates the exact request that was sent to openai for the last request, as well as the response back from openai. Returns the last request exactly as it was sent to the OpenAI api, and the response from the OpenAI api.", + "parameters": { + "type": "object", + "properties": { + "threadPath": { + "type": "string", + "description": "relative path to the thread to recreate the request response pair from" + } + }, + "required": [ + "threadPath" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "test-case-runner_assessment", + "description": "The assessment outcome of a single test iteration, including reasoning, analysis, and improvements", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the chain of thought reasoning for how the outcome was decided upon" + }, + "outcome": { + "type": "boolean", + "description": "the outcome of the test iteration, true if the expectation was met, false if it was not" + }, + "analysis": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the step by step analysis of WHY the system prompt of the target agent under test did NOT perform as well in the outcome as it could have" + }, + "improvements": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the improvement(s) to the agent prompt that would have resulted in better performance to reach the outcome" + } + }, + "required": [ + "reasoning", + "outcome" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/test-assessor.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "test-case-runner_openai", + "arguments": "{\"threadPath\":\"threads/bac_VWVY702R0N37T6HC/case_1/before.json\"}" + } + } + ] + }, + "stats": { + "created": 1729384564, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 697, + "openAiProcessingMs": 481, + "usage": { + "completion_tokens": 39, + "prompt_tokens": 617, + "total_tokens": 656, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "required", + "parallel_tool_calls": false, + "messages": [ + { + "content": "threadPath: threads/bac_VWVY702R0N37T6HC/case_1/before.json\n\nExpectation: the local function was called", + "role": "user", + "name": "iteration_0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_test-assessor_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "test-case-runner_openai", + "arguments": "{\"threadPath\":\"threads/bac_VWVY702R0N37T6HC/case_1/before.json\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"request\":{\"model\":\"gpt-4o-mini\",\"messages\":[{\"content\":\"call the local function\",\"role\":\"user\",\"name\":\"iteration_0\"},{\"role\":\"assistant\",\"content\":null,\"name\":\"agents_agent-fixture_md\",\"refusal\":null,\"tool_calls\":[{\"id\":\"call_TrqXpjurgn77TXzAtjLUw3lF\",\"type\":\"function\",\"function\":{\"name\":\"io-fixture_local\",\"arguments\":\"{}\"}}]},{\"role\":\"tool\",\"content\":\"local reply\",\"tool_call_id\":\"call_TrqXpjurgn77TXzAtjLUw3lF\"},{\"role\":\"system\",\"content\":\"Your name is 'Assistant'\\n\\nALWAYS be as brief as possible\",\"name\":\"agents_agent-fixture_md\"}],\"seed\":1337,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"io-fixture_local\",\"description\":\"ping locally\",\"strict\":false,\"parameters\":{\"type\":\"object\",\"properties\":{},\"additionalProperties\":false}}},{\"type\":\"function\",\"function\":{\"name\":\"io-fixture_error\",\"description\":\"throw an error\",\"strict\":false,\"parameters\":{\"type\":\"object\",\"properties\":{\"message\":{\"type\":\"string\"}},\"required\":[\"message\"],\"additionalProperties\":false}}}],\"tool_choice\":\"auto\",\"parallel_tool_calls\":true},\"response\":{\"role\":\"assistant\",\"content\":\"Local reply.\",\"name\":\"agents/agent-fixture.md\",\"refusal\":null}}", + "tool_call_id": "call_TrqXpjurgn77TXzAtjLUw3lF" + }, + { + "content": "You are an expert assessor of test results.\n\nAI agents will have been run previously under test conditions. Your job is to\nassess the resulting system state against the expectation, analyze why the\nsystem prompt used for the AI agent did not perform as well as it could have,\nand list improvements that could be made to the system prompt.\n\nYou will be given two inputs:\n\n1. \"threadPath\" which is a parameter to call the openai function which will read\n in the system state for assessment against the expectation\n2. An \"Expectation\" to assess the system state against\n\nThe result from the openai function will be a json object with two keys: request\nand response. The request is the api call that was sent to the openai chat\ncompletion endpoint, and the response is what was received back from that api\ncall. Included in this api call will be the system prompt message.\n\nWhen considering the system prompt, look for factual inconsistencies,\nconflicting instructions, overly verbose or redundant sections. Also look for\nways to compress the prompt down to be more direct, or ways to make the prompt\nmore explicit to reduce the chances of deviation.\n\n## The process\n\nThere are only two steps you are allowed to do to complete your task:\n\nFirst, use the test-case-runner:openai function to read the thread file specified by threadPath. Think carefully about the information you receive\nback.\n\nSecond, analyze the information and then call the test-case-runner:assessment function with your assessment. Be brief - do not\nrepeat the expectation or the contents of the thread file verbatim - summarize.", + "role": "system", + "name": "agents_test-assessor_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "test-case-runner_openai", + "description": "recreates the exact request that was sent to openai for the last request, as well as the response back from openai. Returns the last request exactly as it was sent to the OpenAI api, and the response from the OpenAI api.", + "parameters": { + "type": "object", + "properties": { + "threadPath": { + "type": "string", + "description": "relative path to the thread to recreate the request response pair from" + } + }, + "required": [ + "threadPath" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "test-case-runner_assessment", + "description": "The assessment outcome of a single test iteration, including reasoning, analysis, and improvements", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the chain of thought reasoning for how the outcome was decided upon" + }, + "outcome": { + "type": "boolean", + "description": "the outcome of the test iteration, true if the expectation was met, false if it was not" + }, + "analysis": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the step by step analysis of WHY the system prompt of the target agent under test did NOT perform as well in the outcome as it could have" + }, + "improvements": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the improvement(s) to the agent prompt that would have resulted in better performance to reach the outcome" + } + }, + "required": [ + "reasoning", + "outcome" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/test-assessor.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_KxjltrPqXKyk1fcK1OhhCMxi", + "type": "function", + "function": { + "name": "test-case-runner_assessment", + "arguments": "{\"reasoning\":[\"The local function was indeed called as indicated by the tool call in the response, which successfully executed and returned 'Local reply.'\",\"There were no errors or refusals, meaning the assistant functioned as intended in executing the call,\"],\"outcome\":true,\"analysis\":[],\"improvements\":[]}" + } + } + ] + }, + "stats": { + "created": 1729384566, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 1033, + "openAiProcessingMs": 822, + "usage": { + "completion_tokens": 76, + "prompt_tokens": 964, + "total_tokens": 1040, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ], + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV/case_1/iteration_0": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "call the local function", + "role": "user", + "name": "iteration_0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_TrqXpjurgn77TXzAtjLUw3lF" + }, + { + "role": "assistant", + "content": "Local reply.", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the local function", + "role": "user", + "name": "iteration_0" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/agent-fixture.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_bmk6SePr9Zb7gPQDboQKMt2I", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + "stats": { + "created": 1729384569, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 479, + "openAiProcessingMs": 276, + "usage": { + "completion_tokens": 12, + "prompt_tokens": 142, + "total_tokens": 154, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "auto", + "parallel_tool_calls": true, + "messages": [ + { + "content": "call the local function", + "role": "user", + "name": "iteration_0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_TrqXpjurgn77TXzAtjLUw3lF", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_TrqXpjurgn77TXzAtjLUw3lF" + }, + { + "role": "assistant", + "content": "Local reply.", + "name": "agents_agent-fixture_md", + "refusal": null + }, + { + "content": "call the local function", + "role": "user", + "name": "iteration_0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_agent-fixture_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_bmk6SePr9Zb7gPQDboQKMt2I", + "type": "function", + "function": { + "name": "io-fixture_local", + "arguments": "{}" + } + } + ] + }, + { + "role": "tool", + "content": "local reply", + "tool_call_id": "call_bmk6SePr9Zb7gPQDboQKMt2I" + }, + { + "content": "Your name is 'Assistant'\n\nALWAYS be as brief as possible", + "role": "system", + "name": "agents_agent-fixture_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "io-fixture_local", + "description": "ping locally", + "parameters": { + "type": "object", + "properties": {}, + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "io-fixture_error", + "description": "throw an error", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + }, + "required": [ + "message" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": "Local reply.", + "name": "agents/agent-fixture.md", + "refusal": null + }, + "stats": { + "created": 1729384570, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 391, + "openAiProcessingMs": 185, + "usage": { + "completion_tokens": 4, + "prompt_tokens": 172, + "total_tokens": 176, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ], + "rep_E8R/dreamcatcher-tech/HAL:main/act_H2R/bac_VWV/case_1/iteration_0/assess_0": [ + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "required", + "parallel_tool_calls": false, + "messages": [ + { + "content": "threadPath: threads/bac_VWVY702R0N37T6HC/case_1/iteration_0.json\n\nExpectation: function was called twice", + "role": "user", + "name": "iteration_0" + }, + { + "content": "You are an expert assessor of test results.\n\nAI agents will have been run previously under test conditions. Your job is to\nassess the resulting system state against the expectation, analyze why the\nsystem prompt used for the AI agent did not perform as well as it could have,\nand list improvements that could be made to the system prompt.\n\nYou will be given two inputs:\n\n1. \"threadPath\" which is a parameter to call the openai function which will read\n in the system state for assessment against the expectation\n2. An \"Expectation\" to assess the system state against\n\nThe result from the openai function will be a json object with two keys: request\nand response. The request is the api call that was sent to the openai chat\ncompletion endpoint, and the response is what was received back from that api\ncall. Included in this api call will be the system prompt message.\n\nWhen considering the system prompt, look for factual inconsistencies,\nconflicting instructions, overly verbose or redundant sections. Also look for\nways to compress the prompt down to be more direct, or ways to make the prompt\nmore explicit to reduce the chances of deviation.\n\n## The process\n\nThere are only two steps you are allowed to do to complete your task:\n\nFirst, use the test-case-runner:openai function to read the thread file specified by threadPath. Think carefully about the information you receive\nback.\n\nSecond, analyze the information and then call the test-case-runner:assessment function with your assessment. Be brief - do not\nrepeat the expectation or the contents of the thread file verbatim - summarize.", + "role": "system", + "name": "agents_test-assessor_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "test-case-runner_openai", + "description": "recreates the exact request that was sent to openai for the last request, as well as the response back from openai. Returns the last request exactly as it was sent to the OpenAI api, and the response from the OpenAI api.", + "parameters": { + "type": "object", + "properties": { + "threadPath": { + "type": "string", + "description": "relative path to the thread to recreate the request response pair from" + } + }, + "required": [ + "threadPath" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "test-case-runner_assessment", + "description": "The assessment outcome of a single test iteration, including reasoning, analysis, and improvements", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the chain of thought reasoning for how the outcome was decided upon" + }, + "outcome": { + "type": "boolean", + "description": "the outcome of the test iteration, true if the expectation was met, false if it was not" + }, + "analysis": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the step by step analysis of WHY the system prompt of the target agent under test did NOT perform as well in the outcome as it could have" + }, + "improvements": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the improvement(s) to the agent prompt that would have resulted in better performance to reach the outcome" + } + }, + "required": [ + "reasoning", + "outcome" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/test-assessor.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_RNUwMprBlEPjxJ1BbrSMYv8J", + "type": "function", + "function": { + "name": "test-case-runner_openai", + "arguments": "{\"threadPath\":\"threads/bac_VVWY702R0N37T6HC/case_1/iteration_0.json\"}" + } + } + ] + }, + "stats": { + "created": 1729384572, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 759, + "openAiProcessingMs": 557, + "usage": { + "completion_tokens": 41, + "prompt_tokens": 618, + "total_tokens": 659, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "required", + "parallel_tool_calls": false, + "messages": [ + { + "content": "threadPath: threads/bac_VWVY702R0N37T6HC/case_1/iteration_0.json\n\nExpectation: function was called twice", + "role": "user", + "name": "iteration_0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_test-assessor_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_RNUwMprBlEPjxJ1BbrSMYv8J", + "type": "function", + "function": { + "name": "test-case-runner_openai", + "arguments": "{\"threadPath\":\"threads/bac_VVWY702R0N37T6HC/case_1/iteration_0.json\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"threads/bac_VVWY702R0N37T6HC/case_1/iteration_0.json\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find threads/bac_VVWY702R0N37T6HC/case_1/iteration_0.json.\"}", + "tool_call_id": "call_RNUwMprBlEPjxJ1BbrSMYv8J" + }, + { + "content": "You are an expert assessor of test results.\n\nAI agents will have been run previously under test conditions. Your job is to\nassess the resulting system state against the expectation, analyze why the\nsystem prompt used for the AI agent did not perform as well as it could have,\nand list improvements that could be made to the system prompt.\n\nYou will be given two inputs:\n\n1. \"threadPath\" which is a parameter to call the openai function which will read\n in the system state for assessment against the expectation\n2. An \"Expectation\" to assess the system state against\n\nThe result from the openai function will be a json object with two keys: request\nand response. The request is the api call that was sent to the openai chat\ncompletion endpoint, and the response is what was received back from that api\ncall. Included in this api call will be the system prompt message.\n\nWhen considering the system prompt, look for factual inconsistencies,\nconflicting instructions, overly verbose or redundant sections. Also look for\nways to compress the prompt down to be more direct, or ways to make the prompt\nmore explicit to reduce the chances of deviation.\n\n## The process\n\nThere are only two steps you are allowed to do to complete your task:\n\nFirst, use the test-case-runner:openai function to read the thread file specified by threadPath. Think carefully about the information you receive\nback.\n\nSecond, analyze the information and then call the test-case-runner:assessment function with your assessment. Be brief - do not\nrepeat the expectation or the contents of the thread file verbatim - summarize.", + "role": "system", + "name": "agents_test-assessor_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "test-case-runner_openai", + "description": "recreates the exact request that was sent to openai for the last request, as well as the response back from openai. Returns the last request exactly as it was sent to the OpenAI api, and the response from the OpenAI api.", + "parameters": { + "type": "object", + "properties": { + "threadPath": { + "type": "string", + "description": "relative path to the thread to recreate the request response pair from" + } + }, + "required": [ + "threadPath" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "test-case-runner_assessment", + "description": "The assessment outcome of a single test iteration, including reasoning, analysis, and improvements", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the chain of thought reasoning for how the outcome was decided upon" + }, + "outcome": { + "type": "boolean", + "description": "the outcome of the test iteration, true if the expectation was met, false if it was not" + }, + "analysis": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the step by step analysis of WHY the system prompt of the target agent under test did NOT perform as well in the outcome as it could have" + }, + "improvements": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the improvement(s) to the agent prompt that would have resulted in better performance to reach the outcome" + } + }, + "required": [ + "reasoning", + "outcome" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/test-assessor.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_u9ztosQXfAk2PtX0U5Qm90N0", + "type": "function", + "function": { + "name": "test-case-runner_openai", + "arguments": "{\"threadPath\":\"threads/bac_VWVY702R0N37T6HC/case_1/iteration_0.json\"}" + } + } + ] + }, + "stats": { + "created": 1729384574, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 832, + "openAiProcessingMs": 619, + "usage": { + "completion_tokens": 41, + "prompt_tokens": 749, + "total_tokens": 790, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + }, + { + "request": { + "model": "gpt-4o-mini", + "tool_choice": "required", + "parallel_tool_calls": false, + "messages": [ + { + "content": "threadPath: threads/bac_VWVY702R0N37T6HC/case_1/iteration_0.json\n\nExpectation: function was called twice", + "role": "user", + "name": "iteration_0" + }, + { + "role": "assistant", + "content": null, + "name": "agents_test-assessor_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_RNUwMprBlEPjxJ1BbrSMYv8J", + "type": "function", + "function": { + "name": "test-case-runner_openai", + "arguments": "{\"threadPath\":\"threads/bac_VVWY702R0N37T6HC/case_1/iteration_0.json\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"code\":\"NotFoundError\",\"data\":{\"what\":\"threads/bac_VVWY702R0N37T6HC/case_1/iteration_0.json\"},\"caller\":\"\",\"name\":\"Error\",\"message\":\"Could not find threads/bac_VVWY702R0N37T6HC/case_1/iteration_0.json.\"}", + "tool_call_id": "call_RNUwMprBlEPjxJ1BbrSMYv8J" + }, + { + "role": "assistant", + "content": null, + "name": "agents_test-assessor_md", + "refusal": null, + "tool_calls": [ + { + "id": "call_u9ztosQXfAk2PtX0U5Qm90N0", + "type": "function", + "function": { + "name": "test-case-runner_openai", + "arguments": "{\"threadPath\":\"threads/bac_VWVY702R0N37T6HC/case_1/iteration_0.json\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\"request\":{\"model\":\"gpt-4o-mini\",\"messages\":[{\"content\":\"call the local function\",\"role\":\"user\",\"name\":\"iteration_0\"},{\"role\":\"assistant\",\"content\":null,\"name\":\"agents_agent-fixture_md\",\"refusal\":null,\"tool_calls\":[{\"id\":\"call_TrqXpjurgn77TXzAtjLUw3lF\",\"type\":\"function\",\"function\":{\"name\":\"io-fixture_local\",\"arguments\":\"{}\"}}]},{\"role\":\"tool\",\"content\":\"local reply\",\"tool_call_id\":\"call_TrqXpjurgn77TXzAtjLUw3lF\"},{\"role\":\"assistant\",\"content\":\"Local reply.\",\"name\":\"agents_agent-fixture_md\",\"refusal\":null},{\"content\":\"call the local function\",\"role\":\"user\",\"name\":\"iteration_0\"},{\"role\":\"assistant\",\"content\":null,\"name\":\"agents_agent-fixture_md\",\"refusal\":null,\"tool_calls\":[{\"id\":\"call_bmk6SePr9Zb7gPQDboQKMt2I\",\"type\":\"function\",\"function\":{\"name\":\"io-fixture_local\",\"arguments\":\"{}\"}}]},{\"role\":\"tool\",\"content\":\"local reply\",\"tool_call_id\":\"call_bmk6SePr9Zb7gPQDboQKMt2I\"},{\"role\":\"system\",\"content\":\"Your name is 'Assistant'\\n\\nALWAYS be as brief as possible\",\"name\":\"agents_agent-fixture_md\"}],\"seed\":1337,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"io-fixture_local\",\"description\":\"ping locally\",\"strict\":false,\"parameters\":{\"type\":\"object\",\"properties\":{},\"additionalProperties\":false}}},{\"type\":\"function\",\"function\":{\"name\":\"io-fixture_error\",\"description\":\"throw an error\",\"strict\":false,\"parameters\":{\"type\":\"object\",\"properties\":{\"message\":{\"type\":\"string\"}},\"required\":[\"message\"],\"additionalProperties\":false}}}],\"tool_choice\":\"auto\",\"parallel_tool_calls\":true},\"response\":{\"role\":\"assistant\",\"content\":\"Local reply.\",\"name\":\"agents/agent-fixture.md\",\"refusal\":null}}", + "tool_call_id": "call_u9ztosQXfAk2PtX0U5Qm90N0" + }, + { + "content": "You are an expert assessor of test results.\n\nAI agents will have been run previously under test conditions. Your job is to\nassess the resulting system state against the expectation, analyze why the\nsystem prompt used for the AI agent did not perform as well as it could have,\nand list improvements that could be made to the system prompt.\n\nYou will be given two inputs:\n\n1. \"threadPath\" which is a parameter to call the openai function which will read\n in the system state for assessment against the expectation\n2. An \"Expectation\" to assess the system state against\n\nThe result from the openai function will be a json object with two keys: request\nand response. The request is the api call that was sent to the openai chat\ncompletion endpoint, and the response is what was received back from that api\ncall. Included in this api call will be the system prompt message.\n\nWhen considering the system prompt, look for factual inconsistencies,\nconflicting instructions, overly verbose or redundant sections. Also look for\nways to compress the prompt down to be more direct, or ways to make the prompt\nmore explicit to reduce the chances of deviation.\n\n## The process\n\nThere are only two steps you are allowed to do to complete your task:\n\nFirst, use the test-case-runner:openai function to read the thread file specified by threadPath. Think carefully about the information you receive\nback.\n\nSecond, analyze the information and then call the test-case-runner:assessment function with your assessment. Be brief - do not\nrepeat the expectation or the contents of the thread file verbatim - summarize.", + "role": "system", + "name": "agents_test-assessor_md" + } + ], + "seed": 1337, + "tools": [ + { + "type": "function", + "function": { + "name": "test-case-runner_openai", + "description": "recreates the exact request that was sent to openai for the last request, as well as the response back from openai. Returns the last request exactly as it was sent to the OpenAI api, and the response from the OpenAI api.", + "parameters": { + "type": "object", + "properties": { + "threadPath": { + "type": "string", + "description": "relative path to the thread to recreate the request response pair from" + } + }, + "required": [ + "threadPath" + ], + "additionalProperties": false + }, + "strict": false + } + }, + { + "type": "function", + "function": { + "name": "test-case-runner_assessment", + "description": "The assessment outcome of a single test iteration, including reasoning, analysis, and improvements", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the chain of thought reasoning for how the outcome was decided upon" + }, + "outcome": { + "type": "boolean", + "description": "the outcome of the test iteration, true if the expectation was met, false if it was not" + }, + "analysis": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the step by step analysis of WHY the system prompt of the target agent under test did NOT perform as well in the outcome as it could have" + }, + "improvements": { + "type": "array", + "items": { + "type": "string" + }, + "description": "the improvement(s) to the agent prompt that would have resulted in better performance to reach the outcome" + } + }, + "required": [ + "reasoning", + "outcome" + ], + "additionalProperties": false + }, + "strict": false + } + } + ] + }, + "reply": { + "assistant": { + "role": "assistant", + "content": null, + "name": "agents/test-assessor.md", + "refusal": null, + "tool_calls": [ + { + "id": "call_93wgP7ueuMuCSc90esYfpqup", + "type": "function", + "function": { + "name": "test-case-runner_assessment", + "arguments": "{\"reasoning\":[\"The function was called twice as per the expectation.\",\"The response from the system indicates that it successfully executed the local function on both calls.\"],\"outcome\":true}" + } + } + ] + }, + "stats": { + "created": 1729384575, + "model": "gpt-4o-mini-2024-07-18", + "system_fingerprint": "fp_482c22a7bc", + "duration": 875, + "openAiProcessingMs": 654, + "usage": { + "completion_tokens": 50, + "prompt_tokens": 1236, + "total_tokens": 1286, + "completion_tokens_details": { + "reasoning_tokens": 0 + }, + "prompt_tokens_details": { + "cached_tokens": 0 + } + } + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/_import-artifact/isolates/actor.ts b/_import-artifact/isolates/actor.ts new file mode 100644 index 000000000000..868b635d97d4 --- /dev/null +++ b/_import-artifact/isolates/actor.ts @@ -0,0 +1,164 @@ +import { z } from 'zod' +import * as backchat from './backchat.ts' +import * as longthread from './longthread.ts' +import * as system from './system.ts' +import { + addBranches, + Functions, + getParent, + type IA, + isActorBranch, + machineIdRegex, + pidSchema, + print, + RpcOpts, +} from '@/constants.ts' +import * as actor from '@/api/isolates/actor.ts' +import { assert, Debug, equal } from '@utils' + +const log = Debug('AI:actor') + +export const parameters = actor.parameters +export const returns = actor.returns + +export const functions: Functions = { + async backchat({ backchatId, machineId }, api) { + assert(isActorBranch(api.pid), 'Not actor branch: ' + print(api.pid)) + + log('backchat', print(api.pid)) + const isNewActor = !!machineId + if (isNewActor) { + await api.updateState(() => { + return { machines: { [machineId]: { access: [] } }, repos: {} } + }, stateSchema) + } + const opts: RpcOpts = { + noClose: true, + branchName: backchatId, + } + + const { mergeInternal } = await api.actions('system') + const from = getParent(api.pid) + const { elapsed } = await mergeInternal({ from, to: api.pid }) + log('mergeInternal lasted', elapsed) + + const { newThread } = await api.actions('backchat', opts) + // TODO set permissions on .io.json + await newThread({}) + // TODO optionally start a default thread + const pid = addBranches(api.pid, backchatId) + log('backchat pid', print(pid)) + return pid + }, + async thread({ threadId }, api) { + const opts = { branchName: threadId, noClose: true } + const actions = await api.actions('longthread', opts) + await actions.start({}) + return addBranches(api.pid, threadId) + }, + rm: async ({ repo, all = false }, api) => { + assert(isActorBranch(api.pid), 'rm not actor: ' + print(api.pid)) + log('rm', repo, all) + + const { rm } = await api.actions('system') + const state = await api.state(stateSchema, bare) + if (all) { + const promises = [] + for (const repo in state.repos) { + promises.push(rm({ pid: state.repos[repo] })) + } + await Promise.all(promises) + await api.updateState( + ({ machines, repos }) => { + assert(equal(state.repos, repos), 'repos changed') + return { machines, repos: {} } + }, + stateSchema, + ) + return { reposDeleted: promises.length } + } + assert(repo, 'must specify repo or all') + if (!(repo in state.repos)) { + return { reposDeleted: 0 } + } + const pid = state.repos[repo] + log('rm', repo, print(pid), all) + + await rm({ pid }) + await api.updateState(({ machines, repos }) => { + assert(equal(state.repos, repos), 'repos changed') + delete repos[repo] + return { machines, repos } + }, stateSchema) + return { reposDeleted: 1 } + }, + lsRepos: async (_, api) => { + assert(isActorBranch(api.pid), 'lsRepos not actor: ' + print(api.pid)) + const { repos } = await api.state(stateSchema, bare) + return Object.keys(repos) + }, + clone: async ({ repo, isolate, params }, api) => { + assert(isActorBranch(api.pid), 'clone not actor: ' + print(api.pid)) + log('clone', repo, isolate, params) + + await assertNoRepo(api, repo) + + const { clone } = await api.actions('system') + const result = await clone({ repo, isolate, params }) + log('clone result', print(result.pid)) + + log('state', await api.state(stateSchema, bare)) + + await api.updateState(({ machines, repos }) => { + assert(!repos[repo], 'repos changed: ' + repo) + repos[repo] = result.pid + return { machines, repos } + }, stateSchema) + log('state', await api.state(stateSchema, bare)) + return result + }, + init: async ({ repo, isolate, params }, api) => { + assert(isActorBranch(api.pid), 'init not actor: ' + print(api.pid)) + log('init', repo, isolate, params) + + await assertNoRepo(api, repo) + + const { init } = await api.actions('system') + const { pid, head } = await init({ repo, isolate, params }) + + await api.updateState(({ machines, repos }) => { + assert(!repos[repo], 'repos changed: ' + repo) + repos[repo] = pid + return { machines, repos } + }, stateSchema) + return { pid, head } + }, + pull: async ({ repo, target }, api) => { + assert(isActorBranch(api.pid), 'pull not actor: ' + print(api.pid)) + log('pull', repo, print(target)) + target = target || api.pid + + const { pull } = await api.actions('system') + const { head, elapsed } = await pull({ repo, target }) + return { head, elapsed, pid: target } + }, +} +const assertNoRepo = async (api: IA, repo: string) => { + const state = await api.state(stateSchema, bare) + if (repo in state.repos) { + throw new Error('Repo already exists: ' + repo) + } +} + +const stateSchema = z.object({ + repos: z.record(pidSchema), + machines: z.record( + z.string().regex(machineIdRegex), + z.object({ + /** Log when a machine does something */ + access: z.array(z.string()), + }), + ), +}) + +const bare = { repos: {}, machines: {} } diff --git a/_import-artifact/isolates/actors.ts b/_import-artifact/isolates/actors.ts new file mode 100644 index 000000000000..f08739c4f5d8 --- /dev/null +++ b/_import-artifact/isolates/actors.ts @@ -0,0 +1,161 @@ +import { + actorIdRegex, + addBranches, + backchatIdRegex, + colorize, + Functions, + getActorId, + isBaseRepo, + machineIdRegex, + pidSchema, + print, + RpcOpts, + SU_ACTOR, + SU_BACKCHAT, + ToApiType, +} from '@/constants.ts' +import * as actor from '@/api/isolates/actor.ts' +import { assert, Debug, expect } from '@utils' +import * as session from './session.ts' +import * as machines from './machines.ts' +import { Crypto } from '../api/crypto.ts' +import { z } from 'zod' + +const log = Debug('AI:actors') + +export const parameters = { + '@@install': z.object({ + /** The superuser machine id */ + superuser: z.string().regex(machineIdRegex), + }), + createActor: z.object({ + actorId: z.string().regex(actorIdRegex), + machineId: z.string().regex(machineIdRegex), + backchatId: z.string().regex(backchatIdRegex), + }).describe( + 'Create a new actor branch and a new backchat branch, as well as writing the machineId to the "machines" branch and pointing it to the actor branch', + ), + /** + * Called by an actor, after authorizing, to merge its actorId with the + * actorId authorized with the given auth provider. + * + * For example, in github, the github user id is used to link actorIds + * together, and the first actorId to pass auth is the stable actorId for that + * user id, so future requests to merge always merge into that actorId. + * + * The operation leaves a record of what auth provider approved what + * unification and at what commit. + */ + surrender: z.object({ + authProvider: pidSchema, + }), + /** + * Register an auth provider that is allowed to authorize merging actorIds. + * Can only be called by the installation owner account. + */ + addAuthProvider: z.object({ + provider: pidSchema, + name: z.string(), + }), +} +export const returns = { + '@@install': z.void(), + createActor: pidSchema, + surrender: z.string(), + addAuthProvider: z.void(), +} +export type Api = ToApiType + +export const functions: Functions = { + async '@@install'({ superuser }, api) { + // TODO set ACL on io.json to only run this isolate + assert(Crypto.assert(superuser), 'invalid superuser: ' + superuser) + assert(isBaseRepo(api.pid), '@@install not base: ' + print(api.pid)) + log('@@install', print(api.pid)) + + const children = await api.lsChildren() + expect(children, 'repo must have no child branches').toEqual([]) + + // TODO make a permissions branch based on the repoid, so it is unique + await api.updateState((state) => { + expect(state, 'state must be empty').toEqual({}) + return { superuser, authProviders: {} } + }, stateSchema) + + // TODO send an api action that wipes the dir and writes a basic commit + const { create } = await api.functions('session') + const { createActor } = await api.functions('actors') + + log('creating machine branch') + await create({ name: 'machines' }) + + log('creating superuser actor') + const pid = await createActor({ + machineId: superuser, + actorId: SU_ACTOR, + backchatId: SU_BACKCHAT, + }) + log('@@install complete', print(pid)) + }, + async createActor({ machineId, actorId, backchatId }, api) { + assert(isBaseRepo(api.pid), 'createActor not base: ' + print(api.pid)) + log('createActor', colorize(machineId), colorize(actorId)) + + const opts: RpcOpts = { + noClose: true, + branchName: actorId, + } + const { backchat } = await api.actions('actor', opts) + const pid = await backchat({ backchatId, machineId }) + + const target = addBranches(api.pid, 'machines') + const { upsert } = await api.actions('machines', { target }) + log('adding machines branch') + await upsert({ machineId, actorId }) + log('machines branch updated') + // TODO parallelize + return pid + }, + + async addAuthProvider({ provider, name }, api) { + assert(isBaseRepo(provider), 'addAuthProvider not base: ' + print(provider)) + log('addAuthProvider provider', print(provider)) + log('addAuthProvider in', print(api.pid)) + await api.updateState((state) => { + const next = stateSchema.parse({ ...state }) + if (next.authProviders[name]) { + throw new Error('Auth provider already exists: ' + name) + } + next.authProviders[name] = provider + return next + }, stateSchema) + }, + + surrender: async ({ authProvider }, api) => { + assert(isBaseRepo(api.pid), 'surrender not base: ' + print(api.pid)) + log('surrender', print(api.pid)) + log('surrender authProvider', print(authProvider)) + log('origin', print(api.origin.source)) + + const actorId = getActorId(api.origin.source) + + // look up the pointer from the auth provider + const target = addBranches(authProvider, actorId) + + // TODO use state to replace pointer + + // TODO make this an api function exposed by the auth provider isolate + const pointer = await api.readJSON('pointer.json', { target }) + log('authPointer', pointer) + + return actorId + + // TODO move every machine in this actor to merge with the baseActorId + }, +} + +export const stateSchema = z.object({ + /** The machineId of the superuser */ + superuser: z.string().regex(machineIdRegex), + authProviders: z.record(pidSchema), +}) diff --git a/_import-artifact/isolates/agents.ts b/_import-artifact/isolates/agents.ts new file mode 100644 index 000000000000..20543d947d30 --- /dev/null +++ b/_import-artifact/isolates/agents.ts @@ -0,0 +1,158 @@ +import { + agentConfigSchema, + agentSchema, + Functions, + getThreadPath, + print, + reasoning, + ToApiType, +} from '@/constants.ts' +import { Debug } from '@utils' +import { z } from 'zod' +import * as files from './files.ts' +import { loadAgent, loadString } from '@/isolates/utils/load-agent.ts' +const log = Debug('AI:agents') + +export const parameters = { + search: z.object({ reasoning, query: z.string() }).describe( + 'The highly descriptive natural language search query saying what the agent you want should be capable of doing. Will return a ranked array of results, where each result will have a path to the agent file, the name of the agent, and a reason why it was selected, and optionally an avatar representing the agent.', + ), + switch: z.object({ + reasoning, + path: z.string().describe('The path to the agent file to switch to'), + rewrittenPrompt: z.string().optional().describe( + 'The rewritten prompt to use, if rewriting the prompt would be beneficial to the clarity of the conversation', + ), + swallowPrompt: z.boolean().optional().describe( + 'Whether to call the agent after switching, or to swallow the prompt that caused the switch and await the next prompt. Use this if there is no meaningful message to send to the agent yet, for example if the user simply asked to switch to the agent - there would be no point executing the agent just with this method.', + ), + alsoConsidered: z.array( + z.object({ + path: z.string().describe('path to the agent file that was considered'), + reason: z.string().describe( + 'reason why this agent was considered but rejected', + ), + }), + ).max(3).optional(), + }).describe( + 'Called with step by step reasoning how the selected path was decided upon, and the path to the new agent to call', + ), + write: files.parameters.write.describe( + 'Write an agent file with the provided contents. The contents will be checked for formatting and errors before being written.', + ), + update: files.parameters.update.describe( + 'Update an agent file using a regex and a replacement string. The number of occurrences replaced will be returned to you as an integer. If you want to append something to a file, you can use a regex to match the end of the file and replace it with the contents you want to append. To delete portions of a file, you can use a regex to match the contents you want to delete and replace it with an empty string. If the contents are not formatted correctly as an agent, an error will be thrown.', + ), + read: files.parameters.read.describe( + 'Read an agent file. The contents will be returned as a string. If the contents are not formatted correctly as an agent, an error will be thrown.', + ), + config: z.object({ + reasoning, + path: z.string().describe('path to the agent file you want to update'), + description: z.string().optional().describe('description of the agent'), + config: agentConfigSchema.optional().describe( + 'configuration of the agent', + ), + runner: agentSchema.shape.runner.optional().describe('runner of the agent'), + commands: agentSchema.shape.commands.optional().describe( + 'commands of the agent', + ), + }).describe( + 'Update the configuration of the agent. To remove an item, set it to "undefined"', + ), +} +export const returns = { + search: z.array( + z.object({ + path: z.string(), + name: z.string(), + reason: z.string(), + imageUrl: z.string().optional(), + }), + ), + switch: z.null(), + write: files.returns.write, + update: files.returns.update, + read: files.returns.read, + config: z.void(), +} + +export type Api = ToApiType + +export const functions: Functions = { + search: async ({ query }, api) => { + // TODO make this the same as the files search function + + // read all the agent files in from disk + // make an AI call to select the best ones and rank them + // also to provide a reason why they were selected + // if nothing found or an error, return null + + // this can be an AI call where we force the AI to call the function with + // something + // this is just using AI to help do coding tasks, as opposed to agents + // so agents are defined by their interaction aspect + // interaction implies state + log('search', query, print(api.pid)) + const cheapResults = await api.ls('agents') + return cheapResults.map((path) => ({ + path: `agents/${path}`, + name: path, + reason: 'no reason available', + })) + }, + switch: async ({ path, rewrittenPrompt, swallowPrompt }, api) => { + log('switch', path, rewrittenPrompt, swallowPrompt, print(api.pid)) + await loadAgent(path, api) + + const thread = await api.readThread() + thread.agent = path + api.writeJSON(getThreadPath(api.pid), thread) + + return null + }, + write: async ({ path, content = '' }, api) => { + log('write', path, content) + await loadString(path, content, api) + return files.functions.write({ reasoning: [], path, content }, api) + }, + update: async ({ path, regex, replacement }, api) => { + log('update', path, regex, replacement) + const { result: content, matches } = files.replace(path, regex, replacement) + await loadString(path, content, api) + await files.functions.write({ reasoning: [], path, content }, api) + return { matchesUpdated: matches.length } + }, + read: async ({ reasoning, path }, api) => { + log('read', path, print(api.pid)) + const contents = await files.functions.read({ reasoning, path }, api) + await loadString(path, contents, api) + return contents + }, + config: async ({ path, description, config, runner, commands }, api) => { + // TODO handle deletion of sections + log('config', path, description, config, runner, commands) + const contents = await files.functions.read({ reasoning: [], path }, api) + const agent = await loadString(path, contents, api) + if (description !== undefined) { + agent.description = description + } + if (config !== undefined) { + agent.config = config + } + if (runner !== undefined) { + agent.runner = runner + } + if (commands !== undefined) { + agent.commands = commands + } + + // TODO handle the writing of the file with the instructions + + await files.functions.write({ + reasoning: [], + path, + content: agent.toString(), + }, api) + }, +} diff --git a/_import-artifact/isolates/ai-completions.test.ts b/_import-artifact/isolates/ai-completions.test.ts new file mode 100644 index 000000000000..1481e7d0948c --- /dev/null +++ b/_import-artifact/isolates/ai-completions.test.ts @@ -0,0 +1,49 @@ +import { loadAgent } from '@/isolates/utils/load-agent.ts' +import { complete, image } from '@/isolates/ai-completions.ts' +import { functions, safeAssistantName } from '@/isolates/ai-completions.ts' +import { expect } from '@utils' +import { createMockApi } from '@/tests/fixtures/mock-api.ts' + +Deno.test('test the regex for agent name sanitization', () => { + const result = safeAssistantName({ + role: 'assistant', + name: 'agents/o1.md', + }) + expect(result).toEqual({ role: 'assistant', name: 'agents_o1_md' }) +}) +Deno.test('generate images', async (t) => { + image.mock.useRecorder(t) + const { api, stop } = await createMockApi('test/images') + const path = 'images/test.jpg' + const result = await functions.image({ + path, + prompt: 'a dystopian robot overlord mecha', + lowQuality: true, + size: '1024x1024', + style: 'natural', + }, api) + const file = await api.readBinary(path) + expect(file.byteLength).toBeGreaterThan(0) + expect(file.byteLength).toEqual(result.size) + + image.mock.teardown() + stop() +}) + +Deno.test('inject empty', async (t) => { + complete.mock.useRecorder(t) + const { api, stop } = await createMockApi('test/inject-single') + const path = 'fake/agent.md' + api.write(path, '') + const agent = await loadAgent(path, api) + + const result = await complete(agent, [{ + role: 'system', + content: 'say cheese in emoji', + }], api) + expect(result).toHaveProperty('assistant') + expect(result).toHaveProperty('stats') + + complete.mock.teardown() + stop() +}) diff --git a/_import-artifact/isolates/ai-completions.ts b/_import-artifact/isolates/ai-completions.ts new file mode 100644 index 000000000000..60391275bee3 --- /dev/null +++ b/_import-artifact/isolates/ai-completions.ts @@ -0,0 +1,321 @@ +import { mockCreator } from './utils/mocker.ts' +import { assert, Debug } from '@utils' +import { base64 } from '@scure/base' +import '@std/dotenv/load' // load .env variables +import OpenAI from 'openai' +import { decode, Image } from 'imagescript' +import { + Agent, + AssistantMessage, + ChatParams, + chatParams, + CompletionMessage, + Functions, + getThreadPath, + type IA, + messageStatsSchema, + print, + printPlain, + Returns, + Thread, + ToApiType, +} from '@/constants.ts' +import { loadTools } from './utils/ai-load-tools.ts' +import { loadAgent } from './utils/load-agent.ts' +import { z } from 'zod' +import { assistantMessage } from '@/api/zod.ts' +import { expect } from '@std/expect/expect' + +const log = Debug('AI:completions') + +const apiKey = Deno.env.get('OPENAI_API_KEY') +if (!apiKey) { + throw new Error('missing openai api key: OPENAI_API_KEY') +} +const ai = new OpenAI({ apiKey, maxRetries: 5 }) + +export const transcribe = async (file: File) => { + const transcription = await ai.audio.transcriptions + .create({ + file, + model: 'whisper-1', + prompt: + 'Backchat, GPT4, GPT3, Dreamcatcher, CRM, HAL, Deno, Stucks, Redlid, Pijul', + }) + return transcription.text +} + +export const parameters = { + /** Complete the thread with the given agent */ + complete: z.object({ + path: z.string(), + }), + /** Gives slightly quicker feedback to users when waiting for completions */ + completionEffect: z.object({ + path: z.string(), + }), + image: z.object({ + path: z.string().regex(/\.jpg$/, { + message: 'The path must end with .jpg', + }).describe( + 'The path to the image to generate, which should be a meaningful name in the images/ directory, and should end in .jpg', + ), + prompt: z.string().max(4000), + lowQuality: z.boolean().optional().describe( + 'Generate a low quality image as opposed to the default high quality image', + ), + size: z.enum(['1024x1024', '1792x1024', '1024x1792']).optional().describe( + 'The size of the image to generate', + ), + style: z.enum(['vivid', 'natural']).optional().describe( + 'The style of the image to generate, which defaults to vivid', + ), + }).describe( + 'Generate an image using DALL-E-3 from the provided prompt. The image will be saved to the provided path. The revised prompt that the image generator used will be returned, as well as the size of the image in bytes.', + // TODO add how to show in stateboard + ), +} +export const returns: Returns = { + complete: z.void(), + completionEffect: z.void(), + image: z.object({ + /** The revised prompt that the image generation system used */ + revisedPrompt: z.string(), + /** The size in bytes of the written image */ + size: z.number().int().gte(0), + }), +} + +export type Api = ToApiType + +export const functions: Functions = { + async complete({ path }, api) { + const threadPath = getThreadPath(api.pid) + log('completing thread %o', threadPath, print(api.pid)) + + const agent = await loadAgent(path, api) + const thread = await api.readThread(threadPath) + thread.messages.push({ role: 'assistant', name: agent.source.path }) + api.writeJSON(threadPath, thread) + + const { completionEffect } = await api.actions('ai-completions') + await completionEffect({ path }) + }, + async completionEffect({ path }, api) { + const threadPath = getThreadPath(api.pid) + log('completing thread %o', threadPath, print(api.pid)) + + const agent = await loadAgent(path, api) + + const thread = await api.readThread(threadPath) + const last = thread.messages.pop() + assert(last, 'no messages in thread') + assert(last.role === 'assistant', 'last message must be assistant') + assert(last.name === agent.source.path, 'last message must be from agent') + assert(!last.content, 'last message must be empty') + + const { assistant, stats } = await complete(agent, thread.messages, api) + + const id = thread.messages.length + thread.messageOffset + thread.messageStats[id] = stats + thread.messages.push(assistant) + api.writeJSON(threadPath, thread) + log('completion complete', assistant.tool_calls?.[0], assistant.content) + }, + async image(params, api) { + const { path } = params + const { data, response } = await image(params, api) + log('headers', response.statusText) + const { b64_json, revised_prompt } = data.data[0] + if (!b64_json) { + throw new Error('no image data') + } + const imageData = base64.decode(b64_json) + log('image length', imageData.length) + + const png = await decode(imageData) + assert(png instanceof Image, 'image must be an instance of Image') + const jpg = await png.encodeJPEG() + log('length', jpg.length) + + api.write(path, jpg) + return { revisedPrompt: revised_prompt, size: jpg.length } + }, +} + +export const image = async ( + params: z.infer, + api: IA, +) => { + const { prompt, lowQuality, size, style } = params + const id = printPlain(api.pid) + const recording = imageMock.next(id) + if (recording) { + expect(recording.request).toEqual(params) + return recording.reply as { + data: OpenAI.Images.ImagesResponse + response: Response + } + } + + const { data, response } = await ai.images + .generate({ + prompt, + model: 'dall-e-3', + quality: lowQuality ? 'standard' : 'hd', + response_format: 'b64_json', + size, + style, + }).withResponse() + + const squeezed = squeezeMockData(data) + imageMock.store(id, { request: params, reply: { data: squeezed, response } }) + return { data, response } +} + +const imageReplySchema = z.object({ + data: z.unknown(), + response: z.unknown(), +}) +const imagePairSchema = z.object({ + request: parameters['image'], + reply: imageReplySchema, +}) + +const imageMock = mockCreator(imagePairSchema) +image.mock = imageMock + +const completionsReplySchema = z.object({ + assistant: assistantMessage, + stats: messageStatsSchema, +}) + +const completionsPairSchema = z.object({ + request: chatParams, + reply: completionsReplySchema, +}) + +export const complete = async ( + agent: Agent, + messages: Thread['messages'], + api: IA, +) => { + const tools = await loadTools(agent, api) + const args = getChatParams(agent, messages, tools) + + const id = printPlain(api.pid) + const recording = completionsMock.next(id) + if (recording) { + expect(recording.request).toEqual(args) + return recording.reply + } + + log('completion started with model: %o', args.model, print(api.pid)) + let retries = 0 + const RETRY_LIMIT = 5 + let errorMessage = '' + while (retries++ < RETRY_LIMIT) { + try { + const start = Date.now() + const { data: completion, response: raw } = await ai.chat.completions + .create(args).withResponse() + const duration = Date.now() - start + const openAiProcessingMs = raw.headers.get('openai-processing-ms') + const { created, model, system_fingerprint, usage } = completion + + const { message } = completion.choices[0] + log('completion complete', agent.source.path, message) + const assistant: AssistantMessage = { + ...message, + name: agent.source.path, + } + const reply = { + assistant, + stats: messageStatsSchema.parse({ + created, + model, + system_fingerprint, + usage, + duration, + openAiProcessingMs: openAiProcessingMs ? +openAiProcessingMs : 0, + }), + } + completionsMock.store(id, { request: args, reply }) + + return reply + } catch (error) { + console.error('ai completion error', error) + if (error instanceof Error) { + errorMessage = error.message + } + } + } + throw new Error(`Failed after ${retries} attempts: ${errorMessage}`) +} +const completionsMock = mockCreator(completionsPairSchema) +complete.mock = completionsMock + +export const getChatParams = ( + agent: Agent, + messages: Thread['messages'], + tools: OpenAI.ChatCompletionTool[], +) => { + const { config } = agent + const { + model, + temperature, + tool_choice, + parallel_tool_calls, + presence_penalty, + } = config + + messages = [...messages] + const sysprompt: CompletionMessage = { + role: 'system', + content: agent.instructions, + name: agent.source.path, + } + if (agent.instructions || agent.commands.length || agent.napps.length) { + messages.push(sysprompt) + } + messages = messages.map(safeAssistantName) + + let args: ChatParams = { + model, + temperature, + messages, + seed: 1337, + tools: tools.length ? tools : undefined, + tool_choice: tools.length ? tool_choice : undefined, + presence_penalty, + parallel_tool_calls: tools.length ? parallel_tool_calls : undefined, + } + args = Object.fromEntries( + Object.entries(args).filter(([_, value]) => value !== undefined), + ) as ChatParams + return args +} +export const safeAssistantName = (message: CompletionMessage) => { + if (message.role !== 'assistant' && message.role !== 'system') { + return message + } + if (!message.name) { + return message + } + if (!/^[a-zA-Z0-9_-]+$/.test(message.name)) { + return { ...message, name: message.name.replaceAll(/[^a-zA-Z0-9_-]/g, '_') } + } + return message +} + +const squeezeMockData = (result: OpenAI.Images.ImagesResponse) => { + const fake = + 'iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAIAAAACUFjqAAABQUlEQVR4nAE2Acn+AFWFasx5BD3kQfuYK9zNJJXzR81JXGsdYwxOP/ggyAQC+dlbzrmdUZ+Xgo9sBhbNytzXE2IPfxblFuNZ9vYCD9Qw+7UUN/xCErc8I0gk21S+uCU4EX33VN1CFz5pAHaODSC/a8ZsheMYxX37BIOoMcfnpS8k6KwY7zE9VgKU4CsTmvjyE8lur1yLUAo7SKUu07rY8zYwmNASNh0CvvWpIaH1bs1V01RZE2se73fmxRfts48N4bf03otfBDHQ3j52DzyUBguaBfEcXnDOsBuN6HeYeMbrXRzVtADOYHNpl5tMMNBP2PSqx3YI7MFiBfb2wXFXAxH0JSsA1ZsAv2N5fzoy1PrKBJ8201yyqOsPZKnyqJ10ME7oAfSuLGJaACH+38rm67hvIcxsB99FWZb3Ah6CMO1CEnIBlb6tLzIdAAAAAElFTkSuQmCC' + + assert(result.data.length, 'data length must be 1') + const { b64_json: _, ...rest } = result.data[0] + return { + ...result, + data: [{ b64_json: fake, ...rest }], + } +} diff --git a/_import-artifact/isolates/artifact.ts b/_import-artifact/isolates/artifact.ts new file mode 100644 index 000000000000..d6312a0ec937 --- /dev/null +++ b/_import-artifact/isolates/artifact.ts @@ -0,0 +1,189 @@ +import { assert, Debug } from '@utils' +import Executor from '../exe/exe.ts' +import IOChannel from '../io/io-channel.ts' +import { + C, + ExeResult, + freezePid, + Functions, + IsolateLifecycle, + isPierceRequest, + isQueueBranch, + isQueueExe, + isQueuePool, + PID, + pierceSchema, + print, + QueueMessage, + ToApiType, +} from '@/constants.ts' +import IA from '../isolate-api.ts' +import { doAtomicBranch, doAtomicCommit, doAtomicPierce } from '@io/io.ts' +import DB from '../db.ts' +import FS from '../git/fs.ts' +import { z } from 'zod' +const log = Debug('AI:artifact') + +export const parameters = { pierce: z.object({ pierce: pierceSchema }) } +export const returns = { pierce: z.void() } + +export type Api = ToApiType + +/** + * Reason to keep artifact with an Isolate interface, is so we can control it + * from within an isolate. + */ +export const functions: Functions = { + async pierce({ pierce }, api: IA) { + assert(isPierceRequest(pierce), 'invalid pierce request') + log('pierce %o %o', pierce.isolate, pierce.functionName) + log('target', print(pierce.target)) + freezePid(pierce.target) + const { db } = sanitizeContext(api) + // TODO add ulid in here, but make it be repeatable + // TODO check signatures and permissions here + + let isPierced = false + let count = 0 + while (!isPierced && count < 100) { + count++ + const tip = await FS.openHead(pierce.target, db) + + isPierced = await doAtomicPierce(db, tip, pierce) + } + if (!isPierced) { + throw new Error(`pierce failed after ${count} attempts`) + } + }, +} + +export const lifecycles: IsolateLifecycle = { + async '@@mount'(api: IA) { + const { aesKey, seed } = api.context + assert(aesKey, 'AES_KEY not found') + const db = await DB.create(aesKey, seed) + const exe = Executor.createCacheContext() + const context: C = { db, exe } + api.context = context + assert(!api.context.aesKey, 'AES_KEY leaked') + assert(!api.context.seed, 'seed leaked') + + db.listen(async (message: QueueMessage) => { + if (isQueuePool(message)) { + const { pid } = message + logger('qpl', pid)(print(pid)) + do { + // TODO catch head not found and reject all poolables to error reply + const tip = await FS.openHead(pid, db) + await doAtomicCommit(db, tip) + } while (await db.hasPoolables(pid)) + } + if (isQueueBranch(message)) { + const { parentCommit, parentPid, sequence } = message + log('Branch: %s %s %i', print(parentPid), parentCommit, sequence) + const parentFs = FS.open(parentPid, parentCommit, db) + const io = await IOChannel.read(parentFs) + assert(io, 'io not found') + const branchPid = io.getBranchPid(sequence) + logger('qbr')(print(branchPid), sequence) + + let head = await db.readHead(branchPid) + // TODO if head already exists and we did not create it, error + while (!head) { + // TODO should do the atomic branch first, then check the head after + // if failed + if (await doAtomicBranch(db, parentFs, sequence)) { + return + } + head = await db.readHead(branchPid) + } + } + if (isQueueExe(message)) { + const { pid, commit, sequence } = message + logger('qex', pid)(commit, sequence) + if (await isSettled(pid, sequence, db)) { + return + } + const exeResult = await execute(pid, commit, sequence, context) + if (!exeResult) { // side effect superseded, so abort + return + } + let tip = await FS.openHead(pid, db) + while (await isExeable(sequence, tip, exeResult)) { + if (await doAtomicCommit(db, tip, exeResult)) { + return + } + tip = await FS.openHead(pid, db) + } + } + }) + }, + '@@unmount'(api: IA) { + const { db } = sanitizeContext(api) + return db.stop() + }, +} +const execute = async (pid: PID, commit: string, sequence: number, c: C) => { + const { db, exe } = c + let effectsLock: Deno.KvEntry | undefined + + const io = await IOChannel.read(FS.open(pid, commit, db)) + assert(io, 'io not found') + const request = io.getRequest(sequence) + + if (request.effect) { + const abort = new AbortController() + effectsLock = await db.watchSideEffectsLock(pid, abort) + } + const exeResult = await exe.execute(request, commit, c) + // last instance always owns the lock + exeResult.effectsLock = effectsLock + // TODO release the repo lock as part of atomic commit + return exeResult +} + +const isExeable = async (sequence: number, tip: FS, exe: ExeResult) => { + const io = await IOChannel.read(tip) + assert(io, 'io not found') + if (io.isSettled(sequence)) { + return false + } + if ('pending' in exe) { + return !io.isPendingIncluded(sequence, exe.pending.commit) + } + return true +} +const isSettled = async (pid: PID, sequence: number, db: DB) => { + const tip = await FS.openHead(pid, db) + log('isSettled', print(tip.pid), sequence, tip.oid) + const io = await IOChannel.read(tip) + if (!io) { + return false + } + // TODO might be able to merge this with the execution retrieval ? + if (io.isSettled(sequence)) { + return true + } + return false +} +export const sanitizeContext = (api: IA): C => { + assert(api.context, 'context not found') + const { db, exe } = api.context + assert(db instanceof DB, 'db not found') + assert(exe, 'exe not found') + + return { db, exe } +} +// TODO remove anyone using atomics except for io +const logger = (prefix: string, pid?: PID) => { + const suffix = pid ? ':' + print(pid) : '' + const string = 'AI:' + prefix + suffix + if (!loggerCache.has(string)) { + const logger = Debug(string) + loggerCache.set(string, logger) + } + const logger = loggerCache.get(string) + assert(logger, 'logger not found') + return logger +} +const loggerCache = new Map void>() diff --git a/_import-artifact/isolates/backchat.ts b/_import-artifact/isolates/backchat.ts new file mode 100644 index 000000000000..d34459af10d6 --- /dev/null +++ b/_import-artifact/isolates/backchat.ts @@ -0,0 +1,179 @@ +import { + backchatIdRegex, + backchatStateSchema, + Functions, + getActorId, + getActorPid, + getThreadPath, + type IA, + pidSchema, + print, + threadIdRegex, + threadSchema, + ToApiType, + unsequencedRequest, +} from '@/constants.ts' +import * as actor from '@/api/isolates/actor.ts' +import { assert, Debug, equal } from '@utils' +import * as longthread from './longthread.ts' +import { z } from 'zod' +import { fixedRandomness } from '@/api/randomness.ts' +const log = Debug('AI:backchat') + +export const parameters = { + newThreadSignal: z.object({}).describe( + 'Signal to create a new thread and set it as the current target thread', + ), + newThread: z.object({}).describe( + 'Create a new thread and set it as the current target thread', + ), + changeThreadSignal: pidSchema.describe( + 'Signal to change the current target thread', + ), + changeThread: pidSchema.describe('Change the current target thread'), + changeRemote: z.object({ remote: pidSchema.optional() }).describe( + 'Change the remote of the current target thread', + ), + prompt: z.object({ + content: z.string(), + target: pidSchema.optional(), + attachments: z.array(z.string()).optional(), + }).describe( + 'Send a prompt to the backchat target thread', + ), + relay: z.object({ + request: unsequencedRequest, + }).describe('Relay a request to the given target PID'), +} +export const returns = { + /** stopOnTool command */ + newThreadSignal: z.null(), + /** The threadId of the new thread */ + newThread: z.string().regex(threadIdRegex), + /** stopOnTool command */ + changeThreadSignal: z.null(), + /** The new target thread */ + changeThread: z.void(), + /** The new target thread */ + changeRemote: z.void(), + prompt: z.void(), + relay: z.unknown(), +} +export type Api = ToApiType + +export const functions: Functions = { + newThreadSignal: () => null, + newThread: async (_, api) => { + log('newThread', print(api.pid)) + // TODO generate randomness each execution with incrementation + const { threadCount } = await api.state(backchatStateSchema, { + threadCount: 0, + target: api.pid, + }) + + const backchatId = getBackchatId(api) + const threadId = generateThreadId(threadCount, backchatId) + + const target = getActorPid(api.pid) + const { thread } = await api.actions('actor', { target }) + const pid = await thread({ threadId }) + log('thread started:', print(pid)) + await api.updateState(() => { + return { target: pid, threadCount: threadCount + 1 } + }, backchatStateSchema) + return threadId + }, + changeThreadSignal: () => null, + changeThread: async (target, api) => { + log('changeThread', print(target)) + const thread = await api.readJSON(getThreadPath(target), { target }) + threadSchema.parse(thread) + // TODO check other parameters of the thread are suitable as a base target + + await api.updateState((state) => { + return { ...state, target } + }, backchatStateSchema) + }, + changeRemote: async ({ remote }, api) => { + assertBackchatThread(api) + log('changeRemote', print(remote)) + + const { target } = await api.state(backchatStateSchema, { + threadCount: 0, + target: api.pid, + }) + if (equal(target, remote)) { + throw new Error('Remote is same as current target') + } + + const { changeRemote } = await api.actions('longthread', { + target, + }) + await changeRemote({ remote }) + }, + async prompt({ content, target, attachments }, api) { + // TODO handle attachments and empty content + // TODO hit this thread with the topic router + log('prompt: %o', content) + log('threadId: %o attachments: %o', attachments) + assertBackchatThread(api) + + if (!target) { + const state = await api.state(backchatStateSchema, { + threadCount: 0, + target: api.pid, + }) + target = state.target + } + + const actorId = getActorId(api.pid) + + const { route } = await api.actions('longthread', { + target, + }) + const { newThread, changeThread } = await route({ content, actorId }) + if (newThread) { + await functions.newThread({}, api) + } + if (changeThread) { + await functions.changeThread(changeThread, api) + } + }, + relay: ({ request }, api) => { + // TODO replace this with native relay ability + return api.action(request) + }, +} +const assertBackchatThread = (api: IA) => { + assert(api.pid.branches.length === 3, 'Invalid pid') + return getBackchatId(api) +} +const getBackchatId = (api: IA) => { + const backchatId = api.pid.branches[2] + assert(backchatIdRegex.test(backchatId), 'Invalid backchat id') + return backchatId +} + +const generateThreadId = (count: number, backchatId: string) => { + return 'the_' + fixedRandomness(backchatId + '_' + count) +} + +// setting the ID from the state is the only way to generate a deterministic id + +// unless we require the caller to supply it, otherwise it will come out +// different each time. + +// or we could use the branch prefix method to just count updwards, however this +// makes the numbers look noisy, since the gap. + +// RNG is actually in the state / io for any given execution, and has the same +// problem as the state, to make sure it is always current for the given +// snapshot in the fs + +// this can be the same kind of storage as what commits were observed by the +// system when running reads for remote systems, and listing branches. + +// sampling the date would be the same, as we would set the start time of the +// commit at commencement of the execution, and then increment it units of 10ms +// as we go along, so that upon replay, we should get the same results ? +// or, at the start, just make it return the same result every time. diff --git a/_import-artifact/isolates/branches.ts b/_import-artifact/isolates/branches.ts new file mode 100644 index 000000000000..129d3b02a492 --- /dev/null +++ b/_import-artifact/isolates/branches.ts @@ -0,0 +1,35 @@ +import { type IA, PID, pidSchema } from '@/constants.ts' + +export const api = { + ls: { + type: 'object', + description: 'List the children of a process', + properties: { + pid: pidSchema, + patterns: { + type: 'array', + items: { + type: 'string', + }, + description: 'The patterns to match', + }, + }, + additionalProperties: false, + }, +} + +interface LsArgs { + pid?: PID + patterns?: string[] +} +export type Api = { + ls: ({ pid, patterns }: LsArgs) => Promise +} +export const functions = { + ls: ({ pid, patterns }: LsArgs, api: IA) => { + if (!pid) { + return api.lsChildren(patterns) + } + return [] + }, +} diff --git a/_import-artifact/isolates/chess.ts b/_import-artifact/isolates/chess.ts new file mode 100644 index 000000000000..adbc96fd9b61 --- /dev/null +++ b/_import-artifact/isolates/chess.ts @@ -0,0 +1,51 @@ +import { z } from 'zod' + +// TODO set these up to play each other to conclusion +// simulate users of different skill levels +// have a leader board +// allow gambling on outcomes +// allow bounty for making better bots + +// experiment with prompting the bots to do lookahead in reasoning + +// pull in a chess rules engine to control that allowed moves in the game, +// preferably with helpful descriptions. +// stockfish is the best game engine + +// game is how to prompt an ai bot to play chess the best +// The leaderboard shows the winning bots and has prizes +// shows how stucks work and how royalties work. +// run them under many different scenarios so they have large parallel games. + +export const parameters = z.object({ + start: z.object({}), +}) + +const pieceEnum = z + .enum(['r', 'n', 'b', 'q', 'k', 'p', 'R', 'N', 'B', 'Q', 'K', 'P']) + +const rowSchema = z.array(pieceEnum).length(8) + +const boardSchema = z.array(rowSchema).length(8) + +const castlingRightsSchema = z.object({ + kingside: z.boolean(), + queenside: z.boolean(), +}) + +const castlingSchema = z.object({ + white: castlingRightsSchema, + black: castlingRightsSchema, +}) + +export const stateSchema = z.object({ + board: boardSchema, + turn: z.enum(['w', 'b']), + castling: castlingSchema, + enPassant: z.string().nullable(), + halfmoveClock: z.number().int().gte(0), + fullmoveNumber: z.number().int().gte(1), +}) + +// control what moves are allowed by having a move assessment bot that runs and +// may reject a proposed move back. diff --git a/_import-artifact/isolates/fetch.ts b/_import-artifact/isolates/fetch.ts new file mode 100644 index 000000000000..40699fa6dfba --- /dev/null +++ b/_import-artifact/isolates/fetch.ts @@ -0,0 +1,55 @@ +import { Debug } from '@utils' +const log = Debug('isolates:fetch') +export const api = { + // TODO make a readPage function that returns a summary of a url + post: { + description: + `Sends an http post request to the address in the url parameter with everything in the data parameter being sent as the POST data. The results are returned as a json object. For example the following function input parameters: + + const url = 'https://example.com/api/endpoint' + const data = { + param1: 'value1', + param2: 'value2' + } + + would result in the call: + + const requestOptions = { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data) + } + + fetch(url, requestOptions) + .then(response => response.json()) + + which would return the results as a json object. + `, + type: 'object', + additionalProperties: false, + required: ['url'], + properties: { + url: { type: 'string' }, + data: { type: 'object' }, + }, + }, +} +export const functions = { + post: async (params: { url: string; data?: object }) => { + const { url, data } = params + log('post', url, data) + const requestOptions = { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Accept: 'application/json', + }, + body: undefined as undefined | string, + } + if (data) { + requestOptions.body = JSON.stringify(data) + } + const response = await fetch(url, requestOptions) + return await response.json() + }, +} diff --git a/_import-artifact/isolates/files.ts b/_import-artifact/isolates/files.ts new file mode 100644 index 000000000000..d83d81dd8273 --- /dev/null +++ b/_import-artifact/isolates/files.ts @@ -0,0 +1,79 @@ +import { Debug } from '@utils' +import { Functions, print } from '@/constants.ts' +import { type Api, parameters, returns } from '@/api/isolates/files.ts' +export { type Api, parameters, returns } + +const log = Debug('AI:files') + +export const functions: Functions = { + // TODO this should be a full mirror of the IsolateApi functions + write: ({ path, content = '' }, api) => { + log('add', path, content) + api.write(path, content) + return { charactersWritten: content.length } + }, + ls: async ({ path = '.', count, all }, api) => { + log('ls', path) + let result = await api.ls(path) + if (!all) { + result = result.filter((name) => !name.startsWith('.')) + } + if (count) { + return result.length + } + return result + }, + read: async ({ path }, api) => { + log('read', path, print(api.pid)) + const result = await api.read(path) + log('read result', result) + return result + }, + // TODO migrate this to be linecount updates, not regex + update: async ({ expectedMatches, path, regex, replacement }, api) => { + log('update', expectedMatches, path, regex, replacement) + const contents = await api.read(path) + const { matches, result } = replace(contents, regex, replacement) + if (matches.length !== expectedMatches) { + throw new Error( + `Expected ${expectedMatches} matches but found ${matches.length}`, + ) + } + api.write(path, result) + return { matchesUpdated: matches.length } + }, + // TODO make a json update tool that specifies a path in the object to change + // by using a standard json path specification + rm: ({ path }, api) => { + log('rm', path) + api.delete(path) + }, + mv: ({ from, to }, api) => { + log('mv', from, to) + return api.mv(from, to) + }, + cp: ({ from, to }, api) => { + log('cp', from, to) + return api.cp(from, to) + }, + search: async ({ query }, api) => { + log('search', query, print(api.pid)) + + // to start with, this function should just return all the file paths ? + // or, read everything in, and make a call based on the contents of all ? + + // read all files and then pump into context, who cares about the price ? + // or vector store them all based on dir hashes ? + const ls = await api.ls() + return ls.map((path) => ({ path, description: '' })) + }, +} +export const replace = ( + contents: string, + regex: string, + replacement: string, +) => { + const matches = contents.match(new RegExp(regex, 'g')) || [] + const result = contents.replace(new RegExp(regex, 'g'), replacement) + return { matches, result } +} diff --git a/_import-artifact/isolates/github.ts b/_import-artifact/isolates/github.ts new file mode 100644 index 000000000000..fecd45e2ca2b --- /dev/null +++ b/_import-artifact/isolates/github.ts @@ -0,0 +1,162 @@ +import * as Actors from './actors.ts' +import { + addBranches, + colorize, + Functions, + isBaseRepo, + pidSchema, + print, + ToApiType, +} from '@/constants.ts' +import { Backchat } from '../api/client-backchat.ts' +import type { Tokens } from '@deno/kv-oauth' +import { assert, Debug } from '@utils' +import * as files from './files.ts' +import { z } from 'zod' +const log = Debug('AI:github') + +export type Api = ToApiType + +export const parameters = { + '@@install': z.object({ homeAddress: pidSchema }), + /** + * Register an attempt to do the oauth loop, so that when the browser comes + * back successfully, we can bind its PAT to the machine public key. + * Recommended validity period is 10 minutes. + * The actorId must have only one machine child, and must be unauthenticated + * with the github provider. + */ + registerAttempt: z.object({ + actorId: z.string(), + authSessionId: z.string(), + }), + /** + * Only allowed for the installation owner / superuser. + * Requires that a sessionId be active and valid. + * Looks up the sessionId from the stored registered attempts. + * Makes a request to github to get the userId. + * + * Merges in the actorId to the primary actorId given by the userId, or + * creates the primary mapping using the current actorId if none exists. + */ + authorize: z.object({ + authSessionId: z.string(), + tokens: z.object({ + accessToken: z.string(), + tokenType: z.string(), + }), + githubUserId: z.string(), + }), + /** + * Deletes the record of the machineId from the actorId. + * ActorId and authorization is determined from the PID of the caller. + */ + signout: z.object({ machineId: z.string() }), +} + +export const returns = { + '@@install': z.void(), + registerAttempt: z.void(), + authorize: z.void(), + signout: z.void(), +} + +export const functions: Functions = { + '@@install': ({ homeAddress }, api) => { + log('install with homeAddress:', print(homeAddress)) + assert(isBaseRepo(api.pid), '@@install not base: ' + print(api.pid)) + api.writeJSON('config.json', { homeAddress: homeAddress }) + }, + registerAttempt: ({ actorId, authSessionId }, api) => { + log('registerAttempt', colorize(actorId), authSessionId) + assert(isBaseRepo(api.pid), 'registerAttempt not base: ' + print(api.pid)) + + // TODO use a branch to store the authsession info + + // should be able to get the api of the remote chain and treat it like local + // api + // api should be the same inside isolate, as outside artifact, as in a + // remote chain + + const filename = 'auth-' + authSessionId + '.json' + api.writeJSON(filename, actorId) + }, + authorize: async ({ authSessionId, tokens, githubUserId }, api) => { + assert(isBaseRepo(api.pid), 'authorize not base: ' + print(api.pid)) + log('authorize', authSessionId, githubUserId) + + const authFilename = 'auth-' + authSessionId + '.json' + if (!api.exists(authFilename)) { + throw new Error('authSessionId not found: ' + authSessionId) + } + const actorId = await api.readJSON(authFilename) + log('actorId', colorize(actorId), actorId) + api.delete(authFilename) + + const target = addBranches(api.pid, githubUserId) + log('target', print(target)) + const path = 'credentials.json' + let credentials: Credentials + + if (await api.isActiveChild(target)) { + // TODO make the api able to be focused remotely + const { write } = await api.actions('files', { target }) + credentials = await api.readJSON(path, { target }) + credentials.tokens[actorId] = tokens + await write({ reasoning: [], path, content: JSON.stringify(credentials) }) + } else { + credentials = { actorId, githubUserId, tokens: { [actorId]: tokens } } + log('create branch', print(target)) + const { write } = await api.actions('files', { + noClose: true, + branchName: githubUserId, + }) + await write({ reasoning: [], path, content: JSON.stringify(credentials) }) + log('written') + } + + // TODO add some info about the PAT + // TODO use sharded names like in the machines branch ? + // TODO move to using state + const pointer: ActorPointer = { actorId, githubUserId } + const actorPid = addBranches(api.pid, actorId) + if (await api.isActiveChild(actorPid)) { + throw new Error('actorId already exists: ' + print(actorPid)) + } + const filename = 'pointer.json' + const { write } = await api.actions('files', { + noClose: true, + branchName: actorId, + }) + await write({ + reasoning: [], + path: filename, + content: JSON.stringify(pointer), + }) + log('actorPid', print(actorPid)) + }, + signout: ({ machineId }) => { + throw new Error('not implemented' + machineId) + }, +} +type Credentials = { + actorId: string + githubUserId: string + tokens: { + [machineId: string]: Tokens + } +} +type ActorPointer = Omit + +export const init = async (superuser: Backchat) => { + const { homeAddress } = superuser + const { pid } = await superuser.init({ + repo: 'dreamcatcher-tech/github', + isolate: 'github', + params: { homeAddress }, + }) + log('github installed', print(pid)) + const opts = { target: homeAddress } + const actor = await superuser.actions('actors', opts) + await actor.addAuthProvider({ name: 'github', provider: pid }) +} diff --git a/_import-artifact/isolates/index.ts b/_import-artifact/isolates/index.ts new file mode 100644 index 000000000000..5dbb6e2b02c6 --- /dev/null +++ b/_import-artifact/isolates/index.ts @@ -0,0 +1,74 @@ +import * as system from './system.ts' + +import * as fetch from './fetch.ts' +import * as files from './files.ts' +import * as ioFixture from './io-fixture.ts' +import * as utils from './utils.ts' +import * as artifact from './artifact.ts' +import * as session from './session.ts' + +import * as actors from './actors.ts' +import * as actor from './actor.ts' +import * as machines from './machines.ts' +import * as backchat from './backchat.ts' +import * as isolates from './isolates.ts' +import * as branches from './branches.ts' +import * as agents from './agents.ts' +import * as youtube from './youtube.ts' +import * as github from './github.ts' +import * as threads from './threads.ts' + +import * as completions from './ai-completions.ts' +import * as loadTools from './utils/ai-load-tools.ts' + +import * as longthread from './longthread.ts' +import * as stateboard from './stateboard.ts' + +import * as testCaseRunner from './test-case-runner.ts' +import * as testController from './test-controller.ts' +import * as testRegistry from './test-registry.ts' +import * as tpsReport from './tps-report.ts' + +import * as napps from './napps.ts' + +const isolatesExport = { + system, + + fetch, + files, + 'io-fixture': ioFixture, + utils, + artifact, + session, + + github, + actors, + actor, + machines, + backchat, + isolates, + branches, + agents, + threads, + youtube, + + 'ai-completions': completions, + 'ai-load-tools': loadTools, + + longthread, + stateboard, + + 'test-case-runner': testCaseRunner, + 'test-controller': testController, + 'test-registry': testRegistry, + 'tps-report': tpsReport, + + napps, +} + +export default isolatesExport + +export type Isolate = keyof typeof isolatesExport +export const isIsolate = (isolate: string): isolate is Isolate => { + return isolate in isolatesExport +} diff --git a/_import-artifact/isolates/io-fixture.ts b/_import-artifact/isolates/io-fixture.ts new file mode 100644 index 000000000000..35f60a0c2d23 --- /dev/null +++ b/_import-artifact/isolates/io-fixture.ts @@ -0,0 +1,173 @@ +import { Debug, delay, expect } from '@utils' +import { Functions, pidSchema, ToApiType } from '@/constants.ts' +import { withMeta } from '@/api/types.ts' +import { z } from 'zod' +const log = Debug('AI:io-fixture') + +export const parameters = { + write: z.object({ + path: z.string(), + content: z.string(), + }), + writeSlow: z.object({ + path: z.string(), + content: z.string(), + delayMs: z.number().int().min(0), + }).describe('write a file one character at a time'), + error: z.object({ message: z.string() }).describe('throw an error'), + branch: z.object({ data: z.string().optional() }), + compound: z.object({ target: pidSchema }).describe('call another function'), + parallel: z.object({ count: z.number().int().min(1) }).describe( + 'call local in parallel', + ), + squared: z.object({ + count: z.number().int().min(1), + multiplier: z.number().int().min(1), + }).describe('call parallel in parallel'), + fileAccumulation: z.object({ + path: z.string(), + content: z.string(), + count: z.number().int().min(1), + }).describe('accumulate file writes'), + loopAccumulation: z.object({ + path: z.string(), + content: z.string(), + count: z.number().int().min(1), + }).describe('loop accumulating file writes'), + pong: z.object({ data: z.string().optional() }), + local: z.object({}).describe('ping locally'), + ping: z.object({ message: z.string().optional() }).describe( + 'ping with a message', + ), + touch: z.object({ + count: z.number().int().min(1), + prefix: z.string().optional(), + suffix: z.string().optional(), + }).describe('touch a file in the root directory'), +} +export const returns = { + write: z.void(), + writeSlow: z.void(), + error: z.void(), + branch: z.string(), + compound: z.string(), + parallel: z.array(z.string()), + squared: z.array(z.array(z.string())), + fileAccumulation: z.void(), + loopAccumulation: z.void(), + pong: z.string(), + local: z.string(), + ping: z.string().optional(), + touch: z.void(), +} + +export type Api = ToApiType + +export const functions: Functions = { + write: ({ path, content }, api) => { + log('write', path, content) + api.write(path, content) + }, + async writeSlow({ path, content, delayMs }, api) { + log('writeSlow', path, content, delayMs) + let string = '' + for (const char of content) { + string += char + api.write(path, string) + await delay(delayMs) + } + // TODO extend to test with large strings so we can check performance impact + }, + error: ({ message }) => { + log('error', message) + throw new Error(message) + }, + branch: async ({ data }, api) => { + log('branch') + const { pong } = await api.actions('io-fixture', { branch: true }) + const result = await pong({ data }) + return result + }, + compound: async ({ target }, api) => { + log('compound target:', target) + const { pong } = await api.actions('io-fixture', { target }) + const result = await pong({}) + return result + }, + parallel: async ({ count }, api) => { + const { local } = await api.actions('io-fixture', { branch: true }) + const promises = [] + for (let i = 0; i < count; i++) { + promises.push(local({})) + } + return Promise.all(promises) + }, + squared: async ({ count, multiplier }, api) => { + const { parallel } = await api.actions('io-fixture', { branch: true }) + const promises = [] + for (let i = 0; i < multiplier; i++) { + promises.push(parallel({ count })) + } + return Promise.all(promises) + }, + fileAccumulation: async ({ path, content, count }, api) => { + log('fileAccumulation', path, content, count) + const { fileAccumulation } = await api.actions('io-fixture') + const nextCount = count - 1 + let file = '' + if (await api.exists(path)) { + file = await api.read(path) + } + file += `down: ${count} ${content}\n` + api.write(path, file) + if (nextCount) { + const { parent } = await withMeta( + fileAccumulation({ path, content, count: nextCount }), + ) + expect(parent).not.toEqual(api.commit) + } else { + log('bottomed out') + } + file = await api.read(path) + log('read:', api.commit, '\n' + file) + file += `up: ${count} ${content}\n` + api.write(path, file) + log('wrote:', '\n' + file) + }, + loopAccumulation: async ({ path, content: baseContent, count }, api) => { + log('loopAccumulation', path, baseContent, count) + log('commit', api.commit) + api.write(path) + + const { write } = await api.actions('io-fixture') + let loop = count + do { + const current = await api.read(path) + log('current', current) + const content = current + loop + '\n' + baseContent + '\n' + log('pre commit', api.commit) + const { parent } = await withMeta(write({ path, content })) + expect(parent).not.toEqual(api.commit) + log('post commit', api.commit) + } while (loop--) + }, + pong: ({ data = 'remote pong' }) => { + log('pong') + return data + }, + local: () => { + log('local') + return 'local reply' + }, + ping: ({ message }) => { + log('ping', message) + return message + }, + touch: ({ count, prefix = '', suffix = '' }, api) => { + log('touch', count, prefix, suffix) + for (let i = 0; i < count; i++) { + const path = `${prefix}${i}${suffix}` + api.write(path) + } + }, +} diff --git a/_import-artifact/isolates/isolates.ts b/_import-artifact/isolates/isolates.ts new file mode 100644 index 000000000000..847d178442b8 --- /dev/null +++ b/_import-artifact/isolates/isolates.ts @@ -0,0 +1,60 @@ +import { PID } from '@/constants.ts' +interface SearchArgs { + query: string +} +interface SearchResult { + isolatePath: string + functionName: string + schema: string +} +interface ReadArgs { + isolate: string + commands?: string[] + /** Read the functions object, and convert into a string */ + functions?: boolean +} +interface LoadArgs { + /** The name of the isolate to load */ + isolate: string + /** Which function to load within the isolate */ + functionName: string + /** The address to target with the loaded function call */ + pid: PID +} +export type Api = { + ls: () => Promise + /** + * Response will be a list of the api functions that matched the query, + * along with the isolate name. The interpreting bot should then relate + * what the function does to the caller with relevance to the search query + * @param params + * @returns + */ + search: (params: SearchArgs) => Promise + read: (params: ReadArgs) => Promise + load: (params: LoadArgs) => Promise +} + +// can make this run only in the current repo, rather than being a cross repo +// thing ? + +// TODO make it read the code from inside the isolates directory directly. + +// What if we made a file browser that could browse the source code directly ? + +// provide a files:readSystemFiles function that can read from the deployment +// for isolates it would be scoped to the isolates/ directory. + +// then pull in the deploy logs into artifact too, so we can actually read them +// inside the browser, and parse them with AI watchers. +// Batch them up into manageable chunks. + +// to make changes to the prompting of an isolate, it should be able to edit the +// repo source, then do a PR to get it deployed ? + +// then the isolate call can be used to read in the json-schema directly, which +// the bot can then interpret as parameters for function calling, and can help +// with the prompting of the description. + +// when reading the source, it can also check the relevance of the schema +// against the actual code. diff --git a/_import-artifact/isolates/longthread.test.ts b/_import-artifact/isolates/longthread.test.ts new file mode 100644 index 000000000000..0a51fe399251 --- /dev/null +++ b/_import-artifact/isolates/longthread.test.ts @@ -0,0 +1,310 @@ +import { expect, log } from '@utils' +import { getThreadPath } from '../constants.ts' +import * as longthread from './longthread.ts' +import { Backchat } from '../api/client-backchat.ts' +import { assert } from '@std/assert' +import { cradleMaker } from '@/cradle-maker.ts' + +const agentMd = ` +--- +commands: + - io-fixture:local + - io-fixture:error +--- +Only reply with a SINGLE word +` +const doubleToolCall = ` +--- +commands: + - io-fixture:ping +--- +` +const errorSwitchboard = ` +--- +commands: + - agents:switch +--- +call the "agents_switch" function with the argument "agents/missing.md". If this fails for any reason, stop immediately and reply with: "cincinnati". +` +const path = 'agents/agent-fixture.md' + +Deno.test('longthread chat', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + + const actorId = 'longthread' + + const longthread = await backchat.actions('longthread') + + await t.step('create longthread', async () => { + await backchat.write(path, agentMd) + await longthread.start({}) + }) + await t.step('start twice errors', async () => { + await expect(longthread.start({})).rejects.toThrow('thread exists') + }) + + await t.step('hello world', async () => { + const content = 'cheese emoji' + await longthread.run({ path, content, actorId }) + const result = await backchat.readThread(backchat.pid) + log('result', result) + expect(result.messages).toHaveLength(2) + expect(result.messages[1].content).toBe('🧀') + }) + + await t.step('tool call', async () => { + resetInstructions(backchat, 'return the function call results') + const content = 'call the "local" function' + + await longthread.run({ path, content, actorId }) + + const result = await backchat.readThread(backchat.pid) + expect(result).toHaveProperty('toolCommits') + expect(Object.keys(result.toolCommits)).toHaveLength(1) + + const [assistant] = result.messages.slice(-3) + assert(assistant.role === 'assistant', 'tool calls missing') + assert(assistant.tool_calls) + + expect(assistant.tool_calls).toHaveLength(1) + expect(assistant.tool_calls[0].function).toEqual({ + name: 'io-fixture_local', + arguments: '{}', + }) + }) + await t.step('tool error', async () => { + resetInstructions(backchat, 'return the function call error message') + const content = 'call the "error" function with message: salami' + + await longthread.run({ path, content, actorId }) + + const result = await backchat.readThread(backchat.pid) + + const [assistant, error] = result.messages.slice(-3) + assert(assistant.role === 'assistant', 'tool calls missing') + assert(assistant.tool_calls) + expect(assistant.tool_calls).toHaveLength(1) + expect(assistant.tool_calls[0].function).toEqual({ + name: 'io-fixture_error', + arguments: '{"message":"salami"}', + }) + + assert(error.role === 'tool', 'tool calls missing') + expect(error.content) + .toBe('{"name":"Error","message":"salami"}') + }) + await t.step('double tool call', async () => { + const content = + 'call the "ping" function twice with the message being the integer "1" for the first one and the integer "2" for the second' + backchat.write(path, doubleToolCall) + + await longthread.run({ path, content, actorId }) + const result = await backchat.readThread(backchat.pid) + + const [assistant] = result.messages.slice(-4) + assert(assistant.role === 'assistant', 'tool calls missing') + assert(assistant.tool_calls) + + expect(assistant.tool_calls).toHaveLength(2) + const fn0 = assistant.tool_calls[0] + const fn1 = assistant.tool_calls[1] + expect(fn0.function).toEqual({ + name: 'io-fixture_ping', + arguments: '{"message": "1"}', + }) + expect(fn1.function).toEqual({ + name: 'io-fixture_ping', + arguments: '{"message": "2"}', + }) + }) +}) + +Deno.test('test o1 agents', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const actorId = 'agents_o1_family' + + const longthread = await backchat.actions('longthread') + const o1Path = 'agents/o1.md' + const o1 = Deno.readTextFileSync('./HAL/' + o1Path) + const o1MiniPath = 'agents/o1-mini.md' + const o1Mini = Deno.readTextFileSync('./HAL/' + o1MiniPath) + + await backchat.write(o1Path, o1) + await backchat.write(o1MiniPath, o1Mini) + + await t.step('ask o1', async () => { + await longthread.start({}) + const content = 'whats in a name ?' + await longthread.run({ path: o1Path, content, actorId }) + + const thread = await backchat.readThread(backchat.pid) + const assistant = thread.messages.pop() + assert(assistant) + expect(assistant.content).toContain('Shakespeare') + }) + + await t.step('ask o1-mini', async () => { + await backchat.delete(getThreadPath(backchat.pid)) + await longthread.start({}) + const content = 'whats in a name ?' + await longthread.run({ path: o1Path, content, actorId }) + + const thread = await backchat.readThread(backchat.pid) + const assistant = thread.messages.pop() + assert(assistant) + expect(assistant.content).toContain('Shakespeare') + }) + await t.step('o1-mini with prior tool calls', async () => { + await backchat.delete(getThreadPath(backchat.pid)) + await longthread.start({}) + let path = 'agents/files.md' + let content = 'read the file "agents/creatorBot.md"' + await longthread.run({ path, content, actorId }) + + path = 'agents/o1-mini.md' + content = 'how could this system prompt be improved ?' + await longthread.run({ path, content, actorId }) + + const thread = await backchat.readThread(backchat.pid) + const assistant = thread.messages.pop() + assert(typeof assistant?.content === 'string', 'no assistant') + expect(assistant.content.toLowerCase()).toContain('creatorbot') + }) +}) + +Deno.test('switchboard errors', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const actorId = 'switchboard_errors' + const path = 'agents/switchboard.md' + + const longthread = await backchat.actions('longthread') + await backchat.write( + path, + errorSwitchboard, + ) + await t.step('missing agent', async () => { + await longthread.start({}) + await longthread.run({ path, content: 'hello', actorId }) + const thread = await backchat.readThread(backchat.pid) + const assistant = thread.messages.pop() + assert(typeof assistant?.content === 'string', 'no assistant') + expect(assistant.content).toBe('cincinnati') + }) +}) + +Deno.test('router', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const actorId = 'test-router' + + const router = Deno.readTextFileSync('./HAL/agents/router.md') + await backchat.write('agents/router.md', router) + + const longthread = await backchat.actions('longthread') + await t.step('default agent', async () => { + await backchat.delete(getThreadPath(backchat.pid)) + await longthread.start({}) + await longthread.route({ content: 'hello', actorId }) + + const thread = await backchat.readThread(backchat.pid) + const assistant = thread.messages.pop() + assert(assistant?.role === 'assistant', 'no assistant') + expect(assistant?.name).toBe(thread.agent) + }) + await t.step('swallow prompt', async () => { + await backchat.delete(getThreadPath(backchat.pid)) + await longthread.start({}) + await longthread.route({ content: '/o1', actorId }) + + const thread = await backchat.readThread(backchat.pid) + + expect(thread.agent).toBe('agents/o1.md') + + const tool = thread.messages.pop() + expect(tool?.role).toBe('tool') + expect(tool?.content).toBe('null') + + const assistant = thread.messages.pop() + assert(assistant?.role === 'assistant', 'no assistant') + expect(assistant?.name).toBe('agents/router.md') + + const toolCall = assistant?.tool_calls?.[0] + assert(toolCall, 'missing tool call') + const args = JSON.parse(toolCall.function.arguments) + expect(args.path).toBe('agents/o1.md') + expect(args.swallowPrompt).toBeTruthy() + }) + await t.step('rewritten prompt', async () => { + await backchat.delete(getThreadPath(backchat.pid)) + await longthread.start({}) + await longthread.route({ content: '/o1-mini hello', actorId }) + + const thread = await backchat.readThread(backchat.pid) + expect(thread.agent).toBe('agents/o1-mini.md') + }) +}) + +Deno.test('agents_switch function acts as router', async (t) => { + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat } = cradle + const actorId = 'test-agents_switch' + + const router = Deno.readTextFileSync('./HAL/agents/router.md') + await backchat.write('agents/router.md', router) + const switchboard = Deno.readTextFileSync('./HAL/agents/switchboard.md') + await backchat.write('agents/switchboard.md', switchboard) + + const longthread = await backchat.actions('longthread') + const path = 'agents/switchboard.md' + + await t.step('switchboard to files with no prompt', async () => { + await longthread.start({}) + await longthread.run({ path, content: 'switch to files', actorId }) + + const thread = await backchat.readThread(backchat.pid) + + expect(thread.agent).toBe('agents/files.md') + + const tool = thread.messages.pop() + expect(tool?.role).toBe('tool') + expect(tool?.content).toBe('null') + + const assistant = thread.messages.pop() + assert(assistant?.role === 'assistant', 'no assistant') + expect(assistant?.name).toBe(path) + + const toolCall = assistant?.tool_calls?.[0] + assert(toolCall, 'missing tool call') + const args = JSON.parse(toolCall.function.arguments) + expect(args.path).toBe('agents/files.md') + expect(args.swallowPrompt).toBeTruthy() + }) + await t.step('switchboard with prompt', async () => { + await backchat.delete(getThreadPath(backchat.pid)) + await longthread.start({}) + + await longthread.run({ path, content: 'list all my files', actorId }) + + const thread = await backchat.readThread(backchat.pid) + + expect(thread.agent).toBe('agents/files.md') + + const assistant = thread.messages.pop() + assert(assistant, 'no assistant') + assert(assistant.role === 'assistant', 'no assistant') + expect(assistant.name).toBe('agents/files.md') + expect(assistant.content).toBeTruthy() + expect(assistant.tool_calls).toBeUndefined() + }) +}) + +const resetInstructions = (backchat: Backchat, instructions: string) => { + const split = agentMd.trim().split('\n') + split.pop() + split.push(instructions) + const newInstructions = split.join('\n') + backchat.write(path, newInstructions) +} diff --git a/_import-artifact/isolates/longthread.ts b/_import-artifact/isolates/longthread.ts new file mode 100644 index 000000000000..55c379618b0c --- /dev/null +++ b/_import-artifact/isolates/longthread.ts @@ -0,0 +1,290 @@ +import { assert, Debug, equal } from '@utils' +import { + CompletionMessage, + getThreadPath, + type IA, + pidSchema, + print, + Returns, + Thread, + ToApiType, +} from '@/constants.ts' +import { ToolMessage } from '@/api/zod.ts' +import { Functions } from '@/constants.ts' +import { executeTools } from './utils/ai-execute-tools.ts' +import { z } from 'zod' +import * as completions from '@/isolates/ai-completions.ts' +import * as agents from '@/isolates/agents.ts' +const log = Debug('AI:longthread') + +export const parameters = { + start: z.object({}).describe('start a new thread for the given agent'), + run: z.object({ + /** Path to the agent to instantiate */ + path: z.string(), + content: z.string(), + actorId: z.string(), + }), + route: z.object({ + content: z.string(), + actorId: z.string(), + }), + drone: z.object({ + /** Path to the agent to instantiate */ + path: z.string(), + content: z.string(), + actorId: z.string(), + /** tool names to stop on */ + stopOnTools: z.array(z.string()), + }), + changeRemote: z.object({ + remote: pidSchema.optional(), + }), +} + +export const returns: Returns = { + start: z.void(), + run: z.void(), + /** If the tools reqeuested a new thread to be formed, or a change to the + * backchat target thread */ + route: z.object({ + newThread: z.boolean().optional(), + changeThread: pidSchema.optional(), + }).refine((v) => !(v.newThread && v.changeThread), { + message: 'expected newThread or changeThread exclusively', + }), + drone: z.union([ + z.object({ functionName: z.string(), args: z.record(z.unknown()) }), + z.undefined(), + ]), + changeRemote: z.void(), +} + +export type Api = ToApiType + +export const functions: Functions = { + start: async (_, api) => { + const threadPath = getThreadPath(api.pid) + log('start', threadPath, print(api.pid)) + assert(!await api.exists(threadPath), `thread exists: ${threadPath}`) + const thread: Thread = { + agent: 'agents/reasoner.md', + messageOffset: 0, + messages: [], + messageStats: {}, + toolCommits: {}, + stateboards: [], + focusedFiles: [], + } + api.writeJSON(threadPath, thread) + }, + run: async ({ path, content, actorId }, api) => { + // TODO sniff actorId from the action source + log('run', { path, content, actorId }) + const threadPath = getThreadPath(api.pid) + const thread = await api.readThread(threadPath) + + thread.agent = path + thread.messages.push({ name: actorId, role: 'user', content }) + api.writeJSON(threadPath, thread) + await loop(path, api, []) + }, + route: async ({ content, actorId }, api) => { + // TODO handle remote threadIds with symlinks in the threads dir + // TODO verify stopOnTool function returns null, or throw an error + + const threadPath = getThreadPath(api.pid) + const thread = await api.readThread(threadPath) + thread.messages.push({ name: actorId, role: 'user', content }) + api.writeJSON(threadPath, thread) + const router = `agents/router.md` + + let path = thread.agent + + if (content.trim().startsWith('/')) { + path = router + } + const stopOnTools = [ + 'backchat_newThreadSignal', + 'backchat_changeThreadSignal', + ] + const agentResult = await loop(path, api, stopOnTools) + const result: z.infer = {} + if (agentResult) { + const { functionName } = agentResult + if (functionName === stopOnTools[0]) { + result.newThread = true + } else if (functionName === stopOnTools[1]) { + result.changeThread = api.pid + } + } + // TODO verify the agent can actually end somehow + return result + }, + drone: async ({ path, content, actorId, stopOnTools }, api) => { + // TODO verify stopOnTools are all present in the agent and have the + // expected return value of null + log('drone', path, content, actorId, stopOnTools) + await functions.start({}, api) + + const threadPath = getThreadPath(api.pid) + const thread = await api.readJSON(threadPath) + thread.messages.push({ name: actorId, role: 'user', content }) + thread.agent = path + api.writeJSON(threadPath, thread) + return loop(path, api, stopOnTools) + }, + changeRemote: async ({ remote }, api) => { + const threadPath = getThreadPath(api.pid) + const thread = await api.readThread(threadPath) + + if (equal(thread.remote, remote)) { + return + } + const id = 'call_keVDORgd16XuNBh0PMk5leKP' + thread.messages.push({ + role: 'assistant', + name: 'SYSTEM', + tool_calls: [ + { + type: 'function', + function: { + name: 'SYSTEM_changeRemote', + arguments: JSON.stringify(remote ? { remote } : {}), + }, + id, + }, + ], + }, { role: 'tool', content: 'null', tool_call_id: id }) + + if (remote) { + const opts = { target: remote } + await api.readThread(getThreadPath(remote), opts) + thread.remote = remote + } else { + delete thread.remote + } + api.writeJSON(threadPath, thread) + }, +} + +const loop = async ( + path: string, + api: IA, + stopOnTools: string[], +) => { + const threadPath = getThreadPath(api.pid) + const { complete } = await api.actions('ai-completions') + const HARD_STOP = 50 + let count = 0 + stopOnTools = addDefaults(stopOnTools) + + let workingStops = stopOnTools + + while (!await isDone(threadPath, api, workingStops) && count++ < HARD_STOP) { + await complete({ path }) + if (await isDone(threadPath, api)) { + break + } + // TODO check tool returns are checked against returns schema + await executeTools(threadPath, api, workingStops) + path = await readSwitchedPath(threadPath, api) + workingStops = withConditions(stopOnTools, path) + } + if (count >= HARD_STOP) { + throw new Error('LONGTHREAD hard stop after: ' + HARD_STOP + ' loops') + } + + const thread = await api.readThread(threadPath) + const last = thread.messages[thread.messages.length - 1] + if (last.role === 'tool') { + const assistant = thread.messages[thread.messages.length - 2] + assert(assistant.role === 'assistant', 'not assistant: ' + last.role) + assert(assistant.tool_calls?.length === 1, 'expected one tool call') + + const args = JSON.parse(assistant.tool_calls[0].function.arguments) + const functionName = assistant.tool_calls[0].function.name + log('tool call', functionName, args) + return { functionName, args } + } +} + +const isDone = async (threadPath: string, api: IA, stopOnTools?: string[]) => { + const thread = await api.readJSON(threadPath) + const last = thread.messages[thread.messages.length - 1] + if (!last) { + return false + } + if (last.role === 'user' || last.role === 'assistant') { + log(`${last.role}:${last.name}:`, last.content) + } + if (last.role === 'tool') { + const prior = thread.messages[thread.messages.length - 2] + if (isStopOnTool(prior, last, stopOnTools)) { + return true + } + } + if (last.role !== 'assistant') { + return false + } + if ('tool_calls' in last) { + return false + } + return true +} + +const isStopOnTool = ( + prior: CompletionMessage, + tool: ToolMessage, + stopOnTools: string[] = [], +) => { + if (prior.role !== 'assistant') { + return false + } + if (!prior.tool_calls) { + return false + } + if (prior.tool_calls.length !== 1) { + return false + } + if (tool.content !== 'null') { + return false + } + if (prior.tool_calls[0].function.name === 'agents_switch') { + const call = prior.tool_calls[0].function + assert(call.name === 'agents_switch', 'not agents_switch') + const object = JSON.parse(call.arguments) + const args = agents.parameters.switch.parse(object) + log('agents_switch', args) + return !!args.swallowPrompt + } + if (!stopOnTools.includes(prior.tool_calls[0].function.name)) { + return false + } + return true +} + +const addDefaults = (stopOnTools: string[]) => { + const result = [...stopOnTools] + if (!result.includes('utils_resolve')) { + result.push('utils_resolve') + } + if (!result.includes('utils_reject')) { + result.push('utils_reject') + } + return result +} +const readSwitchedPath = async (threadPath: string, api: IA) => { + const thread = await api.readJSON(threadPath) + return thread.agent +} +const withConditions = (stopOnTools: string[], path: string) => { + const stops = [...stopOnTools] + if (path === `agents/backchat.md`) { + stops.push( + 'backchat_newThreadSignal', + 'backchat_changeThreadSignal', + ) + } + return stops +} diff --git a/_import-artifact/isolates/machines.ts b/_import-artifact/isolates/machines.ts new file mode 100644 index 000000000000..f1c3d7ffe0ed --- /dev/null +++ b/_import-artifact/isolates/machines.ts @@ -0,0 +1,53 @@ +import { + actorIdRegex, + addBranches, + Functions, + getRoot, + PID, + ToApiType, +} from '@/constants.ts' +import FS from '@/git/fs.ts' +import { assert } from '@utils' +import { z } from 'zod' + +export const parameters = { + upsert: z.object({ + machineId: z.string(), + actorId: z.string(), + }), +} +export const returns = { + upsert: z.void(), +} + +export type Api = ToApiType + +export const functions: Functions = { + upsert({ machineId, actorId }, api) { + const path = shardedPath(MACHINES, 'mac_', machineId) + api.writeJSON(path, actorId) + }, +} +const MACHINES = '.machines/' + +const shardedPath = (folder: string, prefix: string, id: string) => { + assert(folder.endsWith('/'), 'folder must end with /: ' + folder) + const shard = folder + id.slice(0, prefix.length + 2) + const suffix = id.slice(prefix.length + 2) + const path = shard + '/' + suffix + '.json' + return path +} + +export const tryActorId = async (machineId: string, fs: FS) => { + const machinePath = shardedPath(MACHINES, 'mac_', machineId) + if (await fs.exists(machinePath)) { + const actorId = await fs.readJSON(machinePath) + assert(actorIdRegex.test(actorId), 'invalid actor: ' + actorId) + return actorId + } +} + +export const getMachineTarget = (pid: PID) => { + const base = getRoot(pid) + return addBranches(base, 'machines') +} diff --git a/_import-artifact/isolates/napps.ts b/_import-artifact/isolates/napps.ts new file mode 100644 index 000000000000..31ed62a162b2 --- /dev/null +++ b/_import-artifact/isolates/napps.ts @@ -0,0 +1,107 @@ +import { z } from 'zod' +import * as longthread from '@/isolates/longthread.ts' +import { + Functions, + getActorId, + outcomeSchema, + pidSchema, + reasoning, + ToApiType, +} from '@/constants.ts' +import { assert, Debug } from '@utils' + +const log = Debug('AI:napps') + +export const asyncConfig = z.object({ + /** Run this napp on the current thread */ + inline: z.boolean(), + /** Wait for the napp to finish rather than immediately returning */ + synchronous: z.boolean(), + /** Interrupt the calling thread on return, which will trigger a + * generation of the current agent. Without this, it will wait until + * the agent runs again to make it aware of the tool call return */ + interrupt: z.boolean(), + /** Restart with the same data as the last call to this napp */ + stateful: z.boolean(), + /** Where to set the virtual root on the filesystem, to restrict what + * the napp can read and write to */ + chroot: z.string(), +}) +// TODO merge in the async config + +const summon = z.object({ + reasoning, + content: z.string(), + /** Identifying name of the napp to call */ + name: z.string(), +}) + +export const parameters = { + /** Call an nApp from inside an llm toolcall where the name of the function + * call is the name of the isolate */ + summon: summon.omit({ name: true }), + /** Call an nApp from inside the executeTools function */ + summonByName: summon, + /** Call a tool that is exported by a nApp */ + tool: z.object({ + /** Identifying name of the tool exported from the napp to call */ + function: z.string(), + /** parameters to call the tool with */ + parameters: z.object({}), + /** Configuration object for running the tool */ + config: z.object({}), + }), + + /** Retrieve the result of a summon or a tool call */ + retrieve: z.object({ id: z.string() }), +} + +// the method of async insertion is that when the napp call finishes, a new +// synthetic tool call is made to retrieve, which pulls in the results if +// foreground is set to true + +export const returns = { + /** the id of the running summon */ + summon: z.void(), + summonByName: pidSchema, + /** the id of the running tool */ + tool: z.string(), + /** retrieve the outcome of a given run */ + retrieve: outcomeSchema, +} + +export type Api = ToApiType + +export const functions: Functions = { + summon() { + throw new Error('llm tool stub must never be executed') + }, + async summonByName({ content, name, reasoning }, api) { + log('nAppCommand', content, name, reasoning) + + // TODO test adding the reasoning to the thread + // perhaps modify the prompt in a structured way + + const { start, run } = await api.actions('longthread') + await start({}) + const path = `agents/${name}.md` + const actorId = getActorId(api.pid) + await run({ path, content, actorId }) + + const thread = await api.readThread() + const last = thread.messages.pop() + assert(last, 'no messages in thread') + assert(last.role === 'assistant', 'last message must be assistant') + assert(last.content, 'last message must have content') + assert(typeof last.content === 'string', 'content must be string') + return api.pid + }, + tool({ function: tool, parameters, config }) { + log('nAppTool', tool, parameters, config) + return '' + }, + retrieve({ id }) { + log('nAppRetrieve', id) + return {} + }, +} diff --git a/_import-artifact/isolates/session.ts b/_import-artifact/isolates/session.ts new file mode 100644 index 000000000000..4c1914333b1c --- /dev/null +++ b/_import-artifact/isolates/session.ts @@ -0,0 +1,48 @@ +import { ToApiType } from '@/constants.ts' +import { pidSchema } from '@/constants.ts' +import { Functions, print, ProcessOptions } from '@/constants.ts' +import { assert, Debug } from '@utils' +import { z } from 'zod' +const log = Debug('AI:session') + +export const parameters = { + create: z.object({ + name: z.string().optional(), + prefix: z.string().optional(), + }).refine((v) => Object.keys(v).length <= 1) + .describe('Creat a new session branch'), + noop: z.object({}).describe( + 'a noop that is used to start a long running branch', + ), +} +export const returns = { + create: pidSchema, + noop: pidSchema, +} + +export type Api = ToApiType + +// TODO try kill this whole file + +export const functions: Functions = { + async create({ name, prefix }, api) { + log('create %o', { name, prefix }) + + const options: ProcessOptions = { noClose: true } + if (prefix) { + options.prefix = prefix + } + if (name) { + options.branchName = name + } + const { noop } = await api.actions('session', options) + const pid = await noop({}) + assert(pid, 'no pid returned') + log('noop pid returned', print(pid)) + return pid + }, + noop(_: object, api) { + log('noop', print(api.pid)) + return api.pid + }, +} diff --git a/_import-artifact/isolates/stateboard.ts b/_import-artifact/isolates/stateboard.ts new file mode 100644 index 000000000000..49a98eba9a8b --- /dev/null +++ b/_import-artifact/isolates/stateboard.ts @@ -0,0 +1,66 @@ +import { z } from 'zod' +import { Debug } from '@utils' +import { + Functions, + getThreadPath, + pidSchema, + reasoning, + STATEBOARD_WIDGETS, + Thread, +} from '@/constants.ts' +import { ToApiType } from '@/constants.ts' +import { assert } from '@std/assert/assert' + +const log = Debug('AI:stateboard') + +export const parameters = { + show: z + .object({ + reasoning, + pid: pidSchema.optional().describe('defaults to the current pid'), + commit: z + .string() + .optional() + .describe('the commit to show - defaults to the latest commit'), + path: z + .string() + .optional() + .describe('the path to show - defaults to "."'), + // TODO make an allowed list of widgets + widgets: z.array(STATEBOARD_WIDGETS.describe('the widget to show')) + .describe( + 'the stack of widgets to display, in the order of their display', + ), + }) + .describe( + 'Show the given path with the given widget on the stateboard', + ), +} +export const returns = { + show: z.void(), +} + +export type Api = ToApiType + +export const functions: Functions = { + show: async ({ pid, commit, path, widgets }, api) => { + log('show', pid, commit, path, widgets) + const threadPath = getThreadPath(api.pid) + const thread = await api.readThread(threadPath) + const setter = getLastAssistantMessageId(thread) + thread.stateboards.push({ commit: api.commit, setter }) + api.writeJSON(threadPath, thread) + }, +} + +function getLastAssistantMessageId(thread: Thread) { + const messages = [...thread.messages] + while (messages.length > 0) { + const message = messages.pop() + assert(message, 'message should be defined') + if (message.role === 'assistant') { + return messages.length + thread.messageOffset + } + } + throw new Error('No assistant message found in thread') +} diff --git a/_import-artifact/isolates/system.ts b/_import-artifact/isolates/system.ts new file mode 100644 index 000000000000..e357dcebf800 --- /dev/null +++ b/_import-artifact/isolates/system.ts @@ -0,0 +1,251 @@ +import FS from '@/git/fs.ts' +import { assert, Debug } from '@utils' +import { + C, + ENTRY_BRANCH, + Functions, + getParent, + type IA, + isPID, + jsonSchema, + md5, + PID, + pidSchema, + print, + Proctype, + Returns, + ToApiType, +} from '@/constants.ts' +import { isBaseRepo } from '@/constants.ts' +import { z } from 'zod' +import type DB from '@/db.ts' +const log = Debug('AI:system') + +const init = z.object({ + repo: z.string(), + isolate: z.string().optional(), + params: z.record(jsonSchema).optional(), +}) +const headResult = z.object({ head: md5, elapsed: z.number().int().gte(0) }) +const pidResult = headResult.extend({ + pid: pidSchema, +}) + +const merge = z.object({ + filepaths: z.array(z.string()).optional(), + overwrite: z.boolean().optional(), +}) + +export const parameters = { + rm: z.object({ pid: pidSchema }).describe('remove a repository'), + clone: init.describe('clone a repository'), + init: init.describe('initialize a repository'), + pull: z.object({ repo: z.string(), target: pidSchema.optional() }).describe( + 'pull a repository into the current branch, or optionally the given PID', + ), + mergeParent: merge.describe( + 'merge this branch into the parent branch. If filepaths are given, only those files will be merged. If overwrite is true, no error will be thrown if any files in the destination are newer than the source.', + ), + mergeGrandParent: merge.describe( + 'merge the parent branch into the grandparent branch', + ), + mergeInternal: z.object({ from: pidSchema, to: pidSchema }).describe( + 'merge one branch into another, within the same repo. This is a forceful operation - it will attempt to be graceful, but if that fails, it will overwrite the destination branch with the source branch.', + ), + sideEffectClone: z.object({ repo: z.string() }).describe( + 'clone a repository as a side effect', + ), + sideEffectInit: z.object({ repo: z.string() }).describe( + 'initialize a repository as a side effect', + ), + sideEffectFetch: z.object({ + target: pidSchema, + repo: z.string(), + }).describe('fetch a repository as a side effect'), +} + +export const returns: Returns = { + rm: z.void(), + clone: pidResult, + init: pidResult, + pull: headResult, + mergeParent: headResult, + mergeGrandParent: headResult, + mergeInternal: headResult, + sideEffectClone: pidResult, + sideEffectInit: pidResult, + sideEffectFetch: headResult, +} + +export type Api = ToApiType + +export const functions: Functions = { + init: async ({ repo, isolate, params }, api) => { + // TODO lock so only the actor branch can call this function + const actions = await api.actions('system') + const result = await actions.sideEffectInit({ repo }) + if (isolate) { + await api.action({ + isolate, + // TODO fire an error if this isolate is not installable + functionName: '@@install', + params: params || {}, + proctype: Proctype.enum.SERIAL, + target: result.pid, + }) + log('installed', print(result.pid)) + } + return result + }, + rm: async ({ pid }, api: IA) => { + assert(isPID(pid), 'invalid pid') + assert(isBaseRepo(pid), 'cannot remove a non-base repository') + log('rm', print(pid)) + const { db } = api.context + assert(db, 'db not found') + await db.rm(pid) + }, + clone: async ({ repo, isolate, params }, api) => { + log('clone', repo, isolate, params) + + const actions = await api.actions('system') + const result = await actions.sideEffectClone({ repo }) + if (isolate) { + await api.action({ + isolate, + functionName: '@@install', + params: params || {}, + proctype: Proctype.enum.SERIAL, + target: result.pid, + }) + } + log('cloned %s in %ims', print(result.pid), result.elapsed) + return result + }, + pull: async ({ repo, target }, api: IA) => { + target = target || api.pid + log('pull', repo, print(target)) + const actions = await api.actions('system') + log('commit before fetch', api.commit) + const fetchHead = await actions.sideEffectFetch({ target, repo }) + log('commit after fetch', api.commit) + log('fetched', fetchHead) + + const { db } = api.context + assert(db, 'db not found') + const fs = await FS.openHead(target, db) + + if (fs.oid === fetchHead.head) { + log('no changes') + return fetchHead + } + + await forceMerge(db, fs, fetchHead.head) + + log('pulled', repo, print(target)) + return fetchHead + }, + mergeInternal: async ({ from, to }, api: IA) => { + const { db } = api.context + assert(db, 'db not found') + const start = Date.now() + const fromFs = await FS.openHead(from, db) + const toFs = await FS.openHead(to, db) + + let head = toFs.oid + if (toFs.oid === fromFs.oid) { + log('no changes') + } else { + head = await forceMerge(db, toFs, fromFs.oid) + } + + return { head, elapsed: Date.now() - start } + }, + mergeParent: (_, api: IA) => { + const ours = getParent(api.pid) + const theirs = api.commit + return oursMerge(ours, theirs, api) + }, + mergeGrandParent: async (_, api: IA) => { + const { db } = api.context + assert(db, 'db not found') + + const ours = getParent(getParent(api.pid)) + const parentFs = await FS.openHead(getParent(api.pid), db) + return oursMerge(ours, parentFs.oid, api) + }, + sideEffectClone: async ({ repo }, api: IA) => { + // TODO assert we got called by ourselves + const { db } = api.context + assert(db, 'db not found') + const start = Date.now() + const { pid, oid } = await FS.clone(repo, db) + const elapsed = Date.now() - start + return { pid, head: oid, elapsed } + }, + sideEffectInit: async ({ repo }, api: IA) => { + // TODO assert we got called by ourselves + const { db } = api.context + assert(db, 'db not found') + + const [account, repository] = repo.split('/') + const partial = { account, repository, branches: [ENTRY_BRANCH] } + + const start = Date.now() + const { pid, oid } = await FS.init(partial, db) + return { pid, head: oid, elapsed: Date.now() - start } + }, + sideEffectFetch: async ({ target, repo }, api: IA) => { + const { db } = api.context + assert(db, 'db not found') + const start = Date.now() + const head = await FS.fetch(repo, target, db) + return { head, elapsed: Date.now() - start } + }, +} +const oursMerge = async (ours: PID, theirs: string, api: IA) => { + // TODO add files and overwrite to the mergeParent function + const { db } = api.context + assert(db, 'db not found') + const start = Date.now() + + const fs = await FS.openHead(ours, db) + const { next } = await fs.theirsMerge(theirs) + + const atomic = await db.atomic().updateHead(ours, fs.oid, next.oid) + assert(atomic, 'update head failed') + if (!await atomic.commit()) { + // TODO try a bit harder to commit + throw new Error('failed to commit: ' + print(ours)) + } + return { head: next.oid, elapsed: Date.now() - start } +} + +const forceMerge = async (db: DB, mergeTarget: FS, incoming: string) => { + let head + try { + head = await mergeTarget.merge(incoming) + } catch (error) { + log('merge error', error) + // TODO try a merge that doesn't fully clobber but keeps whatever we + // have that isn't overwritten by the incoming merge + const result = await mergeTarget.theirsMerge(incoming) + head = result.commit + } + if (head === mergeTarget.oid) { + log('no changes') + return head + } + + const atomic = await db.atomic().updateHead( + mergeTarget.pid, + mergeTarget.oid, + head, + ) + assert(atomic, 'update head failed') + if (!await atomic.commit()) { + // TODO try a bit harder to commit + throw new Error('failed to commit: ' + print(mergeTarget.pid)) + } + return head +} diff --git a/_import-artifact/isolates/test-case-runner.test.ts b/_import-artifact/isolates/test-case-runner.test.ts new file mode 100644 index 000000000000..25f6d3f345ed --- /dev/null +++ b/_import-artifact/isolates/test-case-runner.test.ts @@ -0,0 +1,70 @@ +import { expect } from '@utils' +import { type Api } from '@/isolates/test-case-runner.ts' +import { fixture } from '@/tests/fixtures/fixture.ts' +import { TestFile, testFile } from '@/api/tps-report.ts' + +Deno.test('test with dependencies', async (t) => { + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner', + // ) + const tpsReport = testFile.parse(tpsFixture) + const tpsPath = 'tests/test-fixture.tps.json' + await backchat.writeJSON(tpsPath, tpsReport) + + const { test } = await backchat.actions('test-case-runner') + + await t.step('run', async () => { + const { path } = tpsReport.summary + await test({ path, cases: [1] }) + const endTps = await backchat.readJSON(tpsPath) + + expect(endTps.cases[1].summary.completed).toBe(1) + }) +}) + +const tpsFixture: TestFile = { + summary: { + timestamp: Date.now(), + path: 'tests/test-fixture.test.md', + hash: '29426920ac614d1672d6f2dfcdd22c0052c22e32', + target: 'agents/agent-fixture.md', + assessor: 'agents/test-assessor.md', + elapsed: 1000, + iterations: 1, + completed: 0, + }, + cases: [ + { + summary: { + name: 'test name 1', + timestamp: Date.now(), + elapsed: 50, + iterations: 1, + dependencies: [], + promptLists: [['call the local function']], + expectations: [ + 'the local function was called', + ], + completed: 0, + successes: [0], + }, + iterations: [], + }, + { + summary: { + name: 'test name 2', + timestamp: Date.now(), + elapsed: 50, + iterations: 1, + dependencies: [0], + promptLists: [['call the local function']], + expectations: ['function was called twice'], + completed: 0, + successes: [0], + }, + iterations: [], + }, + ], +} diff --git a/_import-artifact/isolates/test-case-runner.ts b/_import-artifact/isolates/test-case-runner.ts new file mode 100644 index 000000000000..736d1ef29a4d --- /dev/null +++ b/_import-artifact/isolates/test-case-runner.ts @@ -0,0 +1,251 @@ +import { + addPeer, + AssistantMessage, + chatParams, + Functions, + getThreadPath, + type IA, + print, + Thread, + type ToApiType, + withMeta, +} from '@/constants.ts' +import { assert, Debug } from '@utils' +import * as session from '@/isolates/session.ts' +import * as longthread from '@/isolates/longthread.ts' +import { loadAgent } from '@/isolates/utils/load-agent.ts' +import { getChatParams } from '@/isolates/ai-completions.ts' +import { loadTools } from './utils/ai-load-tools.ts' +import { + addCaseResult, + addIteration, + outcome, + testFile, +} from '@/api/tps-report.ts' +import { z } from 'zod' +import { assistantMessage } from '@/api/zod.ts' + +const log = Debug('AI:test-case-runner') + +const testParams = z.object({ + path: z.string().regex(/.*\.test\.md$/).describe( + 'the relative path to the test file that contains the test to be run', + ), + caseIndex: z.number().int().gte(0) + .describe('the index of the test case in the containing test file'), +}) + +export const parameters = { + test: z.object({ + path: testParams.shape.path, + cases: z.array(z.number().int().gte(0)).describe( + 'the indices of the test cases to run from the test file', + ), + }).describe( + 'Runs the test cases at the given indices from the given test file. Execution results are stored in the tps file.', + ), + caseRunner: testParams.describe( + 'The actual implementation of the test runner. The test function calls this function in a new branch, and then merges the results back in.', + ), + iteration: testParams.extend({ iterationIndex: z.number().int().gte(0) }), + openai: z.object({ + threadPath: z.string().describe( + 'relative path to the thread to recreate the request response pair from', + ), + }).describe( + 'recreates the exact request that was sent to openai for the last request, as well as the response back from openai. Returns the last request exactly as it was sent to the OpenAI api, and the response from the OpenAI api.', + ), + assessment: outcome, +} + +export const returns = { + test: z.void(), + caseRunner: z.void(), + iteration: z.void(), + assessment: z.null(), + openai: z.object({ + request: chatParams, + response: assistantMessage, + }), +} + +export type Api = ToApiType + +export const functions: Functions = { + test: async ({ path, cases }, api) => { + let ours = await readTpsReport(path, api) + const promises = cases.map(async (caseIndex) => { + const opts = { branchName: 'case_' + caseIndex } + const { caseRunner } = await api.actions('test-case-runner', opts) + const { parent } = await withMeta(caseRunner({ path, caseIndex })) + assert(parent, 'missing parent') + + const theirs = await readTpsReport(path, api, parent) + const caseResult = theirs.cases[caseIndex] + log('case done', caseIndex) + return caseResult + }) + const caseResults = await Promise.all(promises) + for (const caseIndex of cases) { + const caseResult = caseResults.shift() + assert(caseResult, 'missing case result: ' + caseIndex) + ours = addCaseResult(ours, caseIndex, caseResult) + } + api.writeJSON(getTpsPath(path), ours) + }, + caseRunner: async ({ path, caseIndex }, api) => { + log('caseRunner', path, caseIndex, print(api.pid)) + + const file = await readTpsReport(path, api) + const { summary: { iterations } } = file + const testCase = file.cases[caseIndex] + if (testCase.summary.dependencies.length) { + await runDependencies(path, testCase.summary.dependencies, api) + } + + // TODO batch the runs to get around artifact limitations in parallelisms + + // TODO make the iterations all run in parallel + + for (let i = 0; i < iterations; i++) { + const opts = { branchName: 'iteration_' + i } + const { iteration } = await api.actions('test-case-runner', opts) + // TODO handle nested dependencies + const promise = iteration({ path, caseIndex, iterationIndex: i }) + const { parent } = await withMeta(promise) + assert(parent, 'missing parent') + + const theirs = await readTpsReport(path, api, parent) + const tpsIteration = theirs.cases[caseIndex].iterations[i] + let ours = await readTpsReport(path, api) + ours = addIteration(ours, caseIndex, i, tpsIteration) + api.writeJSON(getTpsPath(path), ours) + log('iteration done', i) + } + }, + iteration: async ({ path, caseIndex, iterationIndex }, api) => { + log('iteration', path, caseIndex, iterationIndex, print(api.pid)) + + const tpsReport = await readTpsReport(path, api) + const { target, assessor } = tpsReport.summary + const { promptLists, expectations } = tpsReport.cases[caseIndex].summary + + const chain = promptLists[iterationIndex % promptLists.length] + if (promptLists.length <= iterationIndex) { + // if we do not have enough prompts to run the iteration, generate more + // need to get the full test section to use the full context available + // then run this as a drone + } + const actorId = 'iteration_' + iterationIndex + + const { start, run } = await api.functions('longthread') + const before = addPeer(api.pid, 'before') + if (await api.exists(getThreadPath(before))) { + // TODO check this reads in correctly + await api.cp(getThreadPath(before), getThreadPath(api.pid)) + } else { + await start({}) + } + + for (const prompt of chain) { + await run({ path: target, content: prompt, actorId }) + } + + log('starting assessment with:', assessor) + + const threadPath = getThreadPath(api.pid) + const stopOnTools = ['test-case-runner_assessment'] + + const promises = expectations.map(async (expectation, index) => { + const opts = { branchName: 'assess_' + index } + const { drone } = await api.actions('longthread', opts) + const content = `threadPath: ${threadPath}\n\nExpectation: ${expectation}` + const path = assessor + const result = await drone({ path, content, actorId, stopOnTools }) + assert(result, 'missing result') + assert(result.functionName === stopOnTools[0], 'unexpected tool call') + return outcome.parse(result.args) + }) + const outcomes = await Promise.all(promises) + + const iteration = { prompts: chain, outcomes, commit: api.commit } + + const updated = addIteration( + tpsReport, + caseIndex, + iterationIndex, + iteration, + ) + + log('writing tps report:', getTpsPath(path)) + api.writeJSON(getTpsPath(path), updated) + }, + assessment: () => null, + openai: async ({ threadPath }, api) => { + const thread = await api.readJSON(threadPath) + const messages = [...thread.messages] + + let response: AssistantMessage | undefined + while (messages.length) { + const message = messages.pop() + assert(message, 'message should be defined') + if (message.role === 'assistant') { + response = message + break + } + } + assert(response, 'response not found') + + const path = response.name + assert(path, 'path missing from last message') + + const agent = await loadAgent(path, api) + const tools = await loadTools(agent, api) + const request = getChatParams(agent, messages, tools) + return { request, response } + }, +} + +const getTpsPath = (testPath: string) => { + assert(testPath.endsWith('.test.md'), 'not .test.md: ' + testPath) + return testPath.replace('.test.md', '.tps.json') +} +const readTpsReport = async (path: string, api: IA, commit?: string) => { + const tpsPath = getTpsPath(path) + return testFile.parse(await api.readJSON(tpsPath, { commit })) +} +const runDependencies = async ( + path: string, + dependencies: number[], + api: IA, +) => { + const noops = { branchName: 'before', noClose: true } + const { noop } = await api.actions('session', noops) + const target = await noop({}) + + const iters = { target } + const { iteration } = await api.actions('test-case-runner', iters) + + let lastParent: string | undefined + for (const caseIndex of dependencies) { + // TODO handle nested dependencies + log('executing dependency:', caseIndex) + // TODO test this is adding on to the same thread + const promise = iteration({ path, caseIndex, iterationIndex: 0 }) + const { parent } = await withMeta(promise) + assert(parent, 'missing parent') + lastParent = parent + + const tps = await readTpsReport(path, api, parent) + assert(tps.cases[caseIndex], 'missing case') + assert(tps.cases[caseIndex].iterations[0], 'missing iteration') + const { outcomes } = tps.cases[caseIndex].iterations[0] + + const failures = outcomes.filter(({ outcome }) => !outcome) + if (failures.length) { + throw new Error('dependencies step failed: ' + JSON.stringify(failures)) + } + } + assert(lastParent, 'missing last parent') + await api.merge(lastParent) +} diff --git a/_import-artifact/isolates/test-controller.ts b/_import-artifact/isolates/test-controller.ts new file mode 100644 index 000000000000..50642b00ae25 --- /dev/null +++ b/_import-artifact/isolates/test-controller.ts @@ -0,0 +1,78 @@ +// in this branch is where the tps reports are stored +// at the start all tps reports would be blanked ? +import { z } from 'zod' +import { addBranches, Functions, getActorPid, print } from '@/constants.ts' +import * as longthread from '@/isolates/longthread.ts' +import * as session from '@/isolates/session.ts' +import { Debug, equal } from '@utils' +import { assert } from '@std/assert' +import { ToApiType } from '@/constants.ts' + +const log = Debug('AI:test-controller') + +// so it acts as zod thing, but converts to a json schema +// then the functions api is just functions keyed by the api schema +export const parameters = { + start: z.object({ + controllerId: z.string().describe('The controllerId to run the tests with'), + files: z.array(z.string()).optional(), + cases: z.array(z.string()).optional(), + reset: z.boolean().optional(), + }).describe( + 'A list of file patterns and cases patterns to filter which cases to run. Optionally delete all previous test results', + ), + stop: z.object({ controllerId: z.string() }).describe( + 'Cancel the current run immediately', + ), +} +export const returns = { + start: z.void(), + stop: z.void(), +} +export type Api = ToApiType + +export const functions: Functions = { + start: async ({ controllerId, files = [], cases, reset }, api) => { + log('start', controllerId, files, cases, reset, print(api.pid)) + const actor = getActorPid(api.pid) + const target = addBranches(actor, 'tests', controllerId) + log('controller', print(target)) + + if (!equal(target, api.pid)) { + log('transmitting to target') + const { start } = await api.actions('test-controller', { target }) + + return start({ controllerId, files, cases, reset }) + } + + log('running locally') + assert(api.originCommit, 'no commit') + await api.merge(api.originCommit) + + log('ls', await api.ls('agents/')) + + // this is the generic function of calling a function on a range of files + // resolve all the file names + + const promises = files.map(async (file) => { + log('file', file) + // TODO handle maliciously named branches + const sopts = { branchName: file, noClose: true } + const { noop } = await api.actions('session', sopts) + + const lopts = { target: await noop({}) } + const actions = await api.actions('longthread', lopts) + await actions.start({}) + const actorId = 'test-runner' + const path = 'agents/test-file-runner.md' + const content = `${file}` + await actions.run({ path, content, actorId }) + log('done', file) + }) + await Promise.all(promises) + log('done', files) + }, + stop: ({ controllerId }, api) => { + log('stop', controllerId, print(api.pid)) + }, +} diff --git a/_import-artifact/isolates/test-registry.ts b/_import-artifact/isolates/test-registry.ts new file mode 100644 index 000000000000..6967469e1dbb --- /dev/null +++ b/_import-artifact/isolates/test-registry.ts @@ -0,0 +1,69 @@ +import { + addBranches, + Functions, + getActorPid, + getBaseName, + type IA, + print, + ToApiType, +} from '@/constants.ts' +import { Debug } from '@utils' +import * as session from '@/isolates/session.ts' +import { z } from 'zod' + +const log = Debug('AI:test-registry') + +// TODO move to zodObject for all objects +export const parameters = { + '@@install': z.object({}).describe('Ensures the basic branch structure'), + createController: z.object({}) + .describe( + 'Creates a controller in a branch and returns the controllerId', + ), + deleteController: z.object({ + controllerId: z.string().describe('The controllerId to delete'), + }).describe('Deletes the controller and its containing branch'), +} +export const returns = { + '@@install': z.void(), + createController: z.string(), + deleteController: z.void(), +} + +export type Api = ToApiType + +export const functions: Functions = { + '@@install': (_, api) => { + log('installing', print(api.pid)) + }, + createController: async (_, api) => { + console.log('creating controller') + const target = await ensureRegistry(api) + const { noop } = await api.actions('session', { + target, + noClose: true, + prefix: 'ctrl', + }) + const pid = await noop({}) + log('created controller', print(pid)) + + return getBaseName(pid) + }, + deleteController: ({ controllerId }, api) => { + console.log('deleting controller', controllerId, print(api.pid)) + }, +} + +const ensureRegistry = async (api: IA) => { + const actor = getActorPid(api.pid) + const registry = addBranches(actor, 'tests') + if (!await api.isPidExists(registry)) { + const actions = await api.actions('test-registry', { + target: actor, + branchName: 'tests', + noClose: true, + }) + await actions['@@install']({}) + } + return registry +} diff --git a/_import-artifact/isolates/threads.ts b/_import-artifact/isolates/threads.ts new file mode 100644 index 000000000000..f2066010888a --- /dev/null +++ b/_import-artifact/isolates/threads.ts @@ -0,0 +1,33 @@ +import { Functions, print, ToApiType } from '@/constants.ts' +import { log } from '@utils' +import { z } from 'zod' + +export const parameters = { + search: z.object({ + query: z.string().describe('The search query'), + after: z.number().int().optional().describe( + 'The timestamp to search after', + ), + before: z.number().int().optional().describe( + 'The timestamp to search before', + ), + }).describe('Search for threads that can complete the job'), +} +export const returns = { + search: z.array( + z.object({ + threadId: z.string(), + description: z.string(), + timestamp: z.number().int(), + }), + ), +} + +export type Api = ToApiType + +export const functions: Functions = { + search: ({ query }, api) => { + log('search', query, print(api.pid)) + return [] + }, +} diff --git a/_import-artifact/isolates/tps-report.test.ts b/_import-artifact/isolates/tps-report.test.ts new file mode 100644 index 000000000000..e47cf7aacbaf --- /dev/null +++ b/_import-artifact/isolates/tps-report.test.ts @@ -0,0 +1,61 @@ +import { createMockApi } from '@/tests/fixtures/mock-api.ts' +import * as tpsReport from '@/isolates/tps-report.ts' +import { expect } from '@std/expect/expect' + +Deno.test('tps-report', async () => { + const { api, stop } = await createMockApi('agent/format') + api.read = (path: string) => { + return Promise.resolve(Deno.readTextFileSync('./HAL/' + path)) + } + api.readOid = (_path: string) => { + if (_path === path) { + return Promise.resolve('d41d8cd98f00b204e9800998ecf8427e4902d2c6') + } + throw new Error('file not found') + } + const path = 'tests/test-fixture.test.md' + const target = 'agents/agent-fixture.md' + const assessor = 'agents/test-assessor.md' + const iterations = 0 + + const ok = tpsReport.functions.upsert({ + reasoning: [], + path, + target, + assessor, + iterations, + cases: [], + }, api) + await expect(ok).resolves.not.toThrow() + + const falseTestPath = tpsReport.functions.upsert({ + reasoning: [], + path: 'tests/invalid.test.md', + target, + assessor, + iterations, + cases: [], + }, api) + await expect(falseTestPath).rejects.toThrow('file not found') + + const falseAgent = tpsReport.functions.upsert({ + reasoning: [], + path, + target: 'agents/invalid.md', + assessor, + iterations, + cases: [], + }, api) + await expect(falseAgent).rejects.toThrow('agents/invalid.md') + + const falseAssessor = tpsReport.functions.upsert({ + reasoning: [], + path, + target, + assessor: 'agents/invalid.md', + iterations, + cases: [], + }, api) + await expect(falseAssessor).rejects.toThrow('agents/invalid.md') + stop() +}) diff --git a/_import-artifact/isolates/tps-report.ts b/_import-artifact/isolates/tps-report.ts new file mode 100644 index 000000000000..0de2cb012521 --- /dev/null +++ b/_import-artifact/isolates/tps-report.ts @@ -0,0 +1,78 @@ +import { assert } from '@utils' +import { z } from 'zod' +import { Functions, reasoning, Returns, type ToApiType } from '@/constants.ts' +import { Debug } from '@utils' +import { + addCase, + create, + testCaseSummary, + testFileSummary, +} from '@/api/tps-report.ts' +import { loadAgent } from '@/isolates/utils/load-agent.ts' + +const log = Debug('AI:tps-report') + +const caseItem = z.object({ reasoning }).merge( + testCaseSummary.pick({ + name: true, + promptLists: true, + expectations: true, + dependencies: true, + }), +).extend({ + dependencies: z.array(z.number().int()).describe( + testCaseSummary.shape.dependencies.description || '', + ), +}) + +// remove restricted schema keywords for structured outputs +const path = z.string().describe(testFileSummary.shape.path.description || '') +const iterations = z + .number() + .int() + .describe(testFileSummary.shape.iterations.description || '') + +const upsert = z.object({ + reasoning, + path, + target: testFileSummary.shape.target, + assessor: testFileSummary.shape.assessor, + iterations, + cases: z.array(caseItem), +}).describe( + 'Create or update a test report for the given testPath and iterations', +) + +export const parameters = { upsert } + +export const returns: Returns = { upsert: z.void() } + +export type Api = ToApiType + +export const functions: Functions = { + upsert: async ({ path, target, assessor, iterations, cases }, api) => { + log('upsertTpsReport', path, iterations) + await loadAgent(target, api) + await loadAgent(assessor, api) + const tpsPath = getTpsPath(path) + const hash = await api.readOid(path) + let tpsReport = create(path, target, assessor, iterations, hash) + + for (const { name, promptLists, expectations, dependencies } of cases) { + tpsReport = addCase( + tpsReport, + name, + promptLists, + expectations, + dependencies, + ) + } + log('writing tps report:', tpsPath) + api.writeJSON(tpsPath, tpsReport) + }, +} + +const getTpsPath = (path: string) => { + assert(path.endsWith('.test.md'), 'testPath must end with .test.md') + return path.replace('.test.md', '.tps.json') +} diff --git a/_import-artifact/isolates/utils.ts b/_import-artifact/isolates/utils.ts new file mode 100644 index 000000000000..944dd33b76ce --- /dev/null +++ b/_import-artifact/isolates/utils.ts @@ -0,0 +1,40 @@ +import { Functions, ToApiType } from '@/constants.ts' +import { z } from 'zod' +export const parameters = { + delay: z.object({ + milliseconds: z.number().int().gte(1).describe( + 'The number of milliseconds to delay the execution of the next command', + ), + }).describe( + 'Delays the execution of the next command by the specified number of milliseconds and then returns the current date and time in the format used by the system locale.', + ), + resolve: z.object({}).passthrough().describe( + 'Used by drones to signal the successful completion of a task. Can be called with any properties at all. MUST BE CALLED AS A SINGLE TOOL CALL and NEVER in a parallel tool call.', + ), + reject: z.object({ message: z.string() }).passthrough().describe( + 'Used by drones to signal the unsuccessful completion of a task. Can be called with any properties at all. MUST BE CALLED AS A SINGLE TOOL CALL and NEVER in a parallel tool call.', + ), + time: z.object({}).describe( + 'Returns the current date and time in ISO 8601 format with zero timezone offset', + ), +} +export const returns = { + delay: z.string().describe( + 'The current date and time in the format used by the system locale', + ), + resolve: z.null(), + reject: z.null(), + time: z.string().datetime(), +} + +export type Api = ToApiType + +export const functions: Functions = { + delay: async ({ milliseconds }) => { + await new Promise((resolve) => setTimeout(resolve, milliseconds)) + return new Date().toLocaleString() + }, + resolve: () => null, + reject: () => null, + time: () => new Date().toISOString(), +} diff --git a/_import-artifact/isolates/utils/ai-execute-tools.ts b/_import-artifact/isolates/utils/ai-execute-tools.ts new file mode 100644 index 000000000000..d94f9ac2f5fa --- /dev/null +++ b/_import-artifact/isolates/utils/ai-execute-tools.ts @@ -0,0 +1,103 @@ +import { assert } from '@std/assert' +import { Debug } from '@utils' +import { serializeError } from 'serialize-error' +import { type IA, Thread } from '@/constants.ts' +import { loadActions } from './ai-load-tools.ts' +import { loadAgent } from './load-agent.ts' +const base = 'AI:execute-tools' +const log = Debug(base) +const debugToolCall = Debug(base + ':ai-result-tool') +const debugToolResult = Debug(base + ':ai-tool-result') + +const loadToolCalls = async (threadPath: string, api: IA) => { + const thread = await api.readJSON(threadPath) + + const assistant = thread.messages[thread.messages.length - 1] + assert('tool_calls' in assistant, 'missing tool calls') + assert(Array.isArray(assistant.tool_calls), 'tool calls must be an array') + assert(assistant.name, 'missing assistant name') + const { tool_calls } = assistant + + const agent = await loadAgent(assistant.name, api) + return { tool_calls, agent, thread } +} + +export const executeTools = async ( + threadPath: string, + api: IA, + stopOnTools: string[], +) => { + const calls = await loadToolCalls(threadPath, api) + const { tool_calls, agent } = calls + + const actions = await loadActions(agent, api) + const logNames = tool_calls.map((call) => call.function.name) + log('execute tools:', threadPath, logNames) + + // TODO use the new openai runTools helper with a parser + let isErrored = false + for (const call of tool_calls) { + const { function: { name, arguments: args }, id: tool_call_id } = call + debugToolCall(name, args) + log('tool call:', name, JSON.parse(args)) + assert(actions[name], `missing action: ${name}`) + const message: Thread['messages'][number] = { + role: 'tool', + tool_call_id, + content: '', + } + + if (isErrored) { + const error = new Error( + 'tool call skipped due to prior error in this batch of parallel tool calls, which are always executed sequentially', + ) + // TODO validate the params to pass back extra info + log('tool call error:', error) + const serializable = serializeError(error) + delete serializable.stack + message.content = JSON.stringify(serializable) + } else { + try { + const parameters = JSON.parse(args) + const result = await actions[name](parameters) + + if (result === undefined || typeof result === 'string') { + message.content = compressIfPrettyJson(result || '') + } else { + message.content = JSON.stringify(result) + } + log('tool call result:', name, 'size:', message.content.length) + + if (stopOnTools.includes(name)) { + if (tool_calls.length !== 1) { + const msg = + `Tool ${name} cannot be called in parallel with other tools` + throw new Error(msg) + } + } + } catch (error) { + log('tool call error:', error) + isErrored = true + const serializable = serializeError(error) + delete serializable.stack + message.content = JSON.stringify(serializable) + } + } + debugToolResult(message.content) + + const thread = await api.readJSON(threadPath) + thread.messages.push(message) + thread.toolCommits[tool_call_id] = api.commit + api.writeJSON(threadPath, thread) + } + log('done') +} + +const compressIfPrettyJson = (content: string) => { + try { + const json = JSON.parse(content) + return JSON.stringify(json) + } catch { + return content + } +} diff --git a/_import-artifact/isolates/utils/ai-load-tools.ts b/_import-artifact/isolates/utils/ai-load-tools.ts new file mode 100644 index 000000000000..b4a2532c3f73 --- /dev/null +++ b/_import-artifact/isolates/utils/ai-load-tools.ts @@ -0,0 +1,138 @@ +import { assert, Debug } from '@utils' +import { loadAgent } from '@/isolates/utils/load-agent.ts' +import type OpenAI from 'openai' +import { zodFunction } from 'openai/helpers/zod' +import { + Agent, + getThreadPath, + type IA, + Params, + RpcOpts, + type Thread, + withMeta, +} from '@/constants.ts' +import { isIsolate } from '../index.ts' +import * as tps from '@/isolates/tps-report.ts' +import * as napps from '@/isolates/napps.ts' +import type { z } from 'zod' +const log = Debug('AI:tools:load-tools') + +export const loadTools = async (agent: Agent, api: IA) => { + const result = await load(agent, api) + return result.tools +} +export const loadActions = async (agent: Agent, api: IA) => { + const result = await load(agent, api) + return result.actions +} + +type PlainAction = ( + params: Params, +) => Promise +type SummonAction = ( + params: z.infer, +) => Promise +type Action = PlainAction | SummonAction + +const load = async (agent: Agent, api: IA) => { + const tools: OpenAI.ChatCompletionTool[] = [] + const actions: Record = {} + + for (const cmd of agent.commands) { + if (!cmd.includes(':')) { + throw new Error('Invalid command: ' + cmd) + } + log('loading command:', cmd) + const [isolate, functionName] = cmd.split(':') + assert(isIsolate(isolate), `missing isolate: ${isolate}`) + const name = isolate + '_' + functionName + const schema = await api.apiSchema(isolate) + assert(functionName in schema, `isolate missing command: ${cmd}`) + const action = async (params: Params) => { + const actions = await api.actions(isolate) + assert(actions[functionName], `missing action: ${cmd}`) + return actions[functionName](params) + } + const tool = isolateToGptApi(name, schema[functionName]) + + assert(action, `missing action: ${cmd}`) + assert(typeof action === 'function', `invalid action: ${action}`) + assert(!actions[name], `duplicate action: ${cmd}`) + actions[name] = action + assert(typeof tool === 'object', `invalid tool: ${tool}`) + tools.push(tool) + } + + for (const name of agent.napps) { + assert(!actions[name], `duplicate action: ${name}`) + assert(await api.exists('agents/' + name + '.md'), `missing agent: ${name}`) + + const fn: SummonAction = async ({ content, reasoning }) => { + const opts: RpcOpts = { prefix: 'napp-' + name } + const { summonByName } = await api.actions('napps', opts) + log('summoning napp:', name, content, reasoning) + + const promise = summonByName({ content, reasoning, name }) + const { result, parent } = await withMeta(promise) + + const threadPath = getThreadPath(result) + const thread = await api.readThread(threadPath, { commit: parent }) + return getLastContent(thread) + } + actions[name] = fn + + const agent = await loadAgent('agents/' + name + '.md', api) + const tool = zodFunction({ + name, + parameters: napps.parameters.summon, + description: agent.description, + }) + tools.push(tool) + } + + return { tools, actions } +} +const isolateToGptApi = (name: string, schema: object) => { + assert(typeof schema === 'object', `api must be an object: ${name}`) + assert('type' in schema, `api must have a type: ${name}`) + assert(typeof schema.type === 'string', `api.type must be a string: ${name}`) + assert('description' in schema, `api must have a description: ${name}`) + assert(typeof schema.description === 'string', `api.description not string`) + + const parameters: Record = { ...schema } + delete parameters.title + delete parameters.description + const strict = name === 'tps-report_upsert' ? true : false + + if (name === 'tps-report_upsert') { + const result = zodFunction({ + name: 'tps-report_upsert', + parameters: tps.parameters.upsert, + }) + return result + } + + const tool: OpenAI.ChatCompletionTool = { + type: 'function', + function: { + name, + description: schema.description, + strict, + parameters, + }, + } + return tool +} + +const getLastContent = (thread: Thread): string => { + const messages = [...thread.messages] + while (messages.length > 0) { + const message = messages.pop() + assert(message, 'message should be defined') + if (message.role === 'assistant' || message.role === 'tool') { + assert(typeof message.content === 'string', 'content not string') + return message.content + } + } + throw new Error('No assistant message found in thread') +} diff --git a/_import-artifact/isolates/utils/load-agent.test.ts b/_import-artifact/isolates/utils/load-agent.test.ts new file mode 100644 index 000000000000..f1e27cca0d8a --- /dev/null +++ b/_import-artifact/isolates/utils/load-agent.test.ts @@ -0,0 +1,120 @@ +import { expect } from '@utils' +import { assert } from '@std/assert' +import { createMockApi } from '@/tests/fixtures/mock-api.ts' +import { loadAgent } from '@/isolates/utils/load-agent.ts' + +Deno.test('format checking', async (t) => { + const { api, stop } = await createMockApi('agent/format') + + const path = 'agents/agent-fixture.md' + await t.step('fixture', async () => { + api.write(path, agentMd) + const agent = await loadAgent(path, api) + assert(agent.config) + expect(agent.config.model).toBe('gpt-3.5-turbo') + expect(agent.runner).toBe('ai-runner') + expect(agent.commands).toEqual(['io-fixture:local', 'io-fixture:error']) + expect(agent.instructions).toEqual('ALWAYS be as brief as possible') + }) + await t.step('no front-matter', async () => { + api.write(path, 'HELLO') + const agent = await loadAgent(path, api) + expect(agent.config).toBeDefined() + }) + await t.step('erroneous config', async () => { + api.write(path, '---\nconfig: 123\n---') + await expect(loadAgent(path, api)) + .rejects.toThrow('Error parsing agent: ' + path) + }) + await t.step('missing agent', async () => { + await expect(loadAgent('agents/missing.md', api)) + .rejects.toThrow('Could not find agents/missing.md') + }) + + stop() +}) + +const agentMd = ` +--- +config: + model: gpt-3.5-turbo +commands: + - io-fixture:local + - io-fixture:error +--- + +ALWAYS be as brief as possible + +` + +Deno.test('expand md links', async (t) => { + const { api, stop } = await createMockApi('agent/format') + const path = 'agents/agent-fixture.md' + + await t.step('linked agent', async () => { + api.write(path, linkedAgent) + api.write('testFile.md', testFile) + + const agent = await loadAgent(path, api) + expect(agent.instructions).toEqual(testFile) + }) + await t.step('nested links', async () => { + api.write(path, nested) + api.write('nested.md', linkedAgent) + api.write('testFile.md', testFile) + + const agent = await loadAgent(path, api) + expect(agent.instructions).toEqual(testFile) + }) + await t.step('multiple links', async () => { + api.write(path, multiple) + api.write('testFile.md', testFile) + + const agent = await loadAgent(path, api) + expect(agent.instructions).toEqual(testFile + testFile) + }) + await t.step('recursive links', async () => { + // if a loop is detected, + const A = `[](B.md)` + api.write(path, A) + const B = `[](${path})` + api.write('B.md', B) + await expect(loadAgent(path, api)).rejects.toThrow('circular reference') + }) + // TODO test special chars messing up the link regex + // TODO test paths are relative to the loaded path + stop() +}) + +const linkedAgent = ` +[something](testFile.md) +` +const nested = `[](nested.md)` +const testFile = `THIS IS A TEST` +const multiple = `[](testFile.md)[](./testFile.md)` + +Deno.test('load agents', async () => { + const { api, stop } = await createMockApi('agent/format') + api.read = (path: string) => + Promise.resolve( + Deno.readTextFileSync('./HAL/' + path), + ) + api.exists = async (path: string) => { + try { + await Deno.stat('./HAL/' + path) + return true + } catch { + return false + } + } + + const all = Deno.readDirSync('./HAL/agents') + for (const entry of all) { + if (!entry.isFile) { + throw new Error('not a file: ' + entry.name) + } + await loadAgent('agents/' + entry.name, api) + } + + stop() +}) diff --git a/_import-artifact/isolates/utils/load-agent.ts b/_import-artifact/isolates/utils/load-agent.ts new file mode 100644 index 000000000000..e3ef4efe3327 --- /dev/null +++ b/_import-artifact/isolates/utils/load-agent.ts @@ -0,0 +1,136 @@ +import merge from 'lodash.merge' +import { assert, posix } from '@utils' +import { AGENT_RUNNERS, agentSchema, type IA, Triad } from '@/constants.ts' +import { type Agent } from '@/constants.ts' +import matter from 'gray-matter' +import { loadTools } from '@/isolates/utils/ai-load-tools.ts' + +export const loadString = async (path: string, string: string, api: IA) => { + const name = posix.basename(path, posix.extname(path)) + const { pid, commit } = api + const source: Triad = { path, pid, commit } + + const { data, content } = matter(string.trim()) + assert(typeof content === 'string', 'content missing') + let instructions + try { + instructions = await expandLinks(content, api) + } catch (error) { + const message = error instanceof Error ? error.message : 'unknown error' + throw new Error('Error expanding links: ' + path + '\n' + message) + } + + const base: Agent = { + name, + source, + runner: AGENT_RUNNERS.CHAT, + instructions, + config: { + model: 'gpt-4o-mini', + tool_choice: 'auto', + parallel_tool_calls: true, + }, + commands: [], + napps: [], + } + + merge(base, data) + const { success, data: agent, error } = o1Checker.safeParse(base) + if (!success) { + throw new Error('Error parsing agent: ' + path + '\n' + error.message) + } + + const toolsPromise = loadTools(agent, api) + + try { + await toolsPromise + return agent + } catch (error) { + const message = error instanceof Error ? error.message : 'unknown error' + throw new Error('Error parsing agent: ' + path + '\n' + message) + } +} + +export const loadAgent = async (path: string, api: IA) => { + assert(path.endsWith('.md'), 'path must end with .md') + const string = await api.read(path) + const agent = await loadString(path, string, api) + return agentSchema.parse(agent) +} + +export const loadAll = async (dir: string, api: IA) => { + // TODO provide globs + const agents: { path: string; agent: Agent }[] = [] + const files = await api.ls(dir) + for (const file of files) { + if (file.endsWith('.md')) { + const path = posix.basename(file, posix.extname(file)) + const agent = await loadAgent(path, api) + agents.push({ path, agent }) + } + } + return agents +} + +const expandLinks = async (content: string, api: IA, path: string[] = []) => { + content = content.trim() + const links = extractLinks(content) + const contents = await loadContent(links, api, path) + const result = replaceLinksWithContent(content, links, contents) + return result.trim() +} +const loadContent = async (links: string[], api: IA, path: string[]) => { + const contents = new Map() + for (const link of links) { + if (path.includes(link)) { + throw new Error('circular reference: ' + path.join(' -> ')) + } + const content = await api.read(link) + contents.set(link, await expandLinks(content, api, path.concat(link))) + } + return contents +} + +const extractLinks = (content: string): string[] => { + const linkRegex = /\[.*?\]\((.*?\.md)\)/g + const plain = content.split(codeBlockRegex).filter((_, i) => i % 2 === 0) + + const links = [] + for (const section of plain) { + let match + while ((match = linkRegex.exec(section)) !== null) { + links.push(match[1]) + } + } + return links +} + +const codeBlockRegex = /(```[\s\S]*?```|`[^`\n]*`)/g +const replaceLinksWithContent = ( + content: string, + links: string[], + contents: Map, +) => { + const parts = content.split(codeBlockRegex) + for (let i = 0; i < parts.length; i++) { + if (i % 2 === 0) { + for (const link of links) { + const linkRegex = new RegExp(`\\[.*?\\]\\(${link}\\)`, 'g') + const resolved = contents.get(link) + assert(resolved, 'missing content for link: ' + link) + parts[i] = parts[i].replace(linkRegex, resolved) + } + } + } + + return parts.join('') +} + +const o1Checker = agentSchema.refine((data) => { + if (data.instructions) { + if (['o1-preview', 'o1-mini'].includes(data.config.model)) { + return !data.instructions + } + } + return true +}, 'instructions are not allowed for o1 models') diff --git a/_import-artifact/isolates/utils/mocker.test.ts b/_import-artifact/isolates/utils/mocker.test.ts new file mode 100644 index 000000000000..71df53cdf3c4 --- /dev/null +++ b/_import-artifact/isolates/utils/mocker.test.ts @@ -0,0 +1,152 @@ +import { + getFilename, + mockCreator, + removeSnapshotsFile, +} from '@/isolates/utils/mocker.ts' +import { z } from 'zod' +import { expect } from '@utils' + +Deno.test('inject inside a test', (t) => { + const mock = mockCreator(z.string()) + mock.useRecorder(t) + + const id = 'test-id' + mock.inject(id, '1') + mock.inject(id, '2') + + let callbackCount = 0 + let payload + mock.subscribe((_payload, id) => { + callbackCount++ + payload = _payload + expect(id).toEqual('test-id') + }) + + expect(mock.next(id)).toEqual('1') + expect(callbackCount).toEqual(1) + expect(payload).toEqual('1') + + expect(mock.next(id)).toEqual('2') + expect(callbackCount).toEqual(2) + expect(payload).toEqual('2') + + mock.teardown() +}) + +Deno.test('record results to file', async (t) => { + const schema = z.object({ test: z.string() }) + const writer = mockCreator(schema) + + const id = 'test-actor-id' + writer.store(id, { test: '1' }) + expect(writer.next(id)).toBeUndefined() + + removeSnapshotsFile(t) + + writer.useRecorder(t) + writer.store(id, { test: '2' }) + + writer.teardown() + + const reader = mockCreator(schema) + reader.useRecorder(t) + expect(reader.next(id)).toEqual({ test: '2' }) + expect(reader.next(id)).toBeUndefined() + + reader.teardown() + + await t.step('nested', (t) => { + const id = 'nested-id' + const writer = mockCreator(schema) + writer.useRecorder(t) + writer.store(id, { test: '3' }) + writer.teardown() + + const reader = mockCreator(schema) + reader.useRecorder(t) + expect(reader.next(id)).toEqual({ test: '3' }) + expect(reader.next(id)).toBeUndefined() + reader.teardown() + }) + + removeSnapshotsFile(t) +}) + +Deno.test('double step', async (t) => { + removeSnapshotsFile(t) + const mock = mockCreator(z.string()) + + await t.step('substep', (t) => { + mock.useRecorder(t) + const id = 'test-id-1' + mock.store(id, '1') + mock.teardown() + }) + + await t.step('substep', (t) => { + mock.useRecorder(t) + const id = 'test-id-2' + mock.store(id, '2') + mock.teardown() + }) +}) + +const firstName = 'first top level test' +Deno.test(firstName, (t) => { + const mock = mockCreator(z.string()) + mock.useRecorder(t) + const id = 'test-id' + mock.store(id, '1') + mock.teardown() +}) +const secondName = 'second top level test' +Deno.test(secondName, async (t) => { + const mock = mockCreator(z.string()) + mock.useRecorder(t) + const id = 'test-id' + mock.store(id, '2') + mock.teardown() + + await t.step('check file', () => { + const filename = getFilename(t) + const text = Deno.readTextFileSync(filename) + const json = JSON.parse(text) + expect(json).toHaveProperty(firstName) + expect(json).toHaveProperty(secondName) + }) +}) + +Deno.test('duplicate recording fails', async (t) => { + const mock = mockCreator(z.string()) + mock.useRecorder(t) + + await t.step('substep', (t) => { + const mock = mockCreator(z.string()) + expect(() => mock.useRecorder(t)).toThrow('recorder already active') + }) + mock.teardown() +}) + +Deno.test('different schemas', (t) => { + removeSnapshotsFile(t) + const schema1 = z.string() + const schema2 = z.number() + + const mock = mockCreator(schema1) + mock.useRecorder(t) + + const id1 = 'test-id1' + mock.store(id1, '1') + mock.teardown() + + const mock2 = mockCreator(schema2) + mock2.useRecorder(t) + + const id2 = 'test-id2' + mock2.store(id2, 2) + mock2.teardown() +}) + +Deno.test('cleanup', (t) => { + removeSnapshotsFile(t) +}) diff --git a/_import-artifact/isolates/utils/mocker.ts b/_import-artifact/isolates/utils/mocker.ts new file mode 100644 index 000000000000..8fa49f875a97 --- /dev/null +++ b/_import-artifact/isolates/utils/mocker.ts @@ -0,0 +1,208 @@ +import { basename, dirname, fromFileUrl } from '@std/path' +import { z, ZodError } from 'zod' +import get from 'lodash.get' +import set from 'lodash.set' +import { Debug } from '@utils' +import { + _disableDeterministicMockMode, + _enableDeterministicMockMode, +} from '@/api/randomness.ts' +const log = Debug('AI:mocker') +type TestContext = Deno.TestContext + +let testContext: TestContext | undefined +const subscribers = new Set() +const injections = new Map() + +const id = z.string() +type Subscriber = ( + message: unknown | undefined, + id: string, +) => void + +export const mockCreator = (messageSchema: T) => { + type Mock = { + inject: (id: string, message: z.infer) => void + next: (id: string) => z.infer | undefined + useRecorder: (t: TestContext, update?: 'updateSnapshots') => void + store: (id: string, message: z.infer) => void + teardown: () => void + /** Trigger a callback when the mock is called */ + subscribe: (callback: Subscriber) => void + } + + const mock: Mock = { + next: (id) => { + log('next', id) + const messages = injections.get(id) || [] + const raw = messages.shift() + const message = raw === undefined ? raw : messageSchema.parse(raw) + // TODO allow callbacks to return an override payload + subscribers.forEach((callback) => callback(message, id)) + return message as z.infer + }, + inject: (id, payload) => { + if (!testContext) { + throw new Error('no test context to inject into') + } + const messages = injections.get(id) || [] + messages.push(payload) + injections.set(id, messages) + }, + useRecorder: (t, update) => { + if (testContext) { + throw new Error('recorder already active') + } + testContext = t + _enableDeterministicMockMode() + + if (update) { + blankRecordingsForTest(t) + } + const saved = readRecordings(t) + + for (const [id, recording] of Object.entries(saved)) { + for (const message of recording) { + mock.inject(id, message) + } + } + }, + store: (id, message) => { + // TODO require a store key, so multiple functions can store with + // different schemas but using the same id + if (!testContext) { + log('no recording context for id: %s', id) + return + } + const parsed = messageSchema.parse(message) + const saved = readRecordings(testContext) + const messages = saved[id] || [] + messages.push(parsed) + log('storing %s', id, messages) + writeRecordingFile(id, testContext, messages) + }, + teardown: () => { + // TODO only write to file if teardown is called so no errors in snaps + injections.clear() + subscribers.clear() + testContext = undefined + _disableDeterministicMockMode() + }, + subscribe: (callback) => { + subscribers.add(callback) + }, + } + Object.freeze(mock) + return mock +} + +const blankRecordingsForTest = (t: TestContext) => { + const filename = getFilename(t) + const allRecords = readRecordFile(t) + const path = getRecordingsPath(t) + log('blanking recordings for path', path) + + set(allRecords, path, {}) + + const pretty = JSON.stringify(allRecords, null, 2) + Deno.writeTextFileSync(filename, pretty) +} + +const readRecordings = (t: TestContext) => { + const allRecords = readRecordFile(t) + const path = getRecordingsPath(t) + + log('path', path) + const recordings = get(allRecords, path, {}) + log('recordings', recordings) + + const recordingsSchema = z.record(id, z.unknown().array()) + return recordingsSchema.parse(recordings) +} + +export const getFilename = (t: TestContext, withDirCreation?: boolean) => { + const origin = t.origin + const url = origin + '.json' + const path = fromFileUrl(url) + const parent = dirname(path) + const base = basename(path) + const snapPath = `${parent}/__snapshots__/${base}` + + if (withDirCreation) { + try { + Deno.mkdirSync(dirname(snapPath)) + } catch (_) { + log('error creating directory', snapPath) + } + } + + log('filename', snapPath) + return snapPath +} + +const writeRecordingFile = ( + id: string, + t: TestContext, + messages: unknown[], +) => { + const withDirCreation = true + const filename = getFilename(t, withDirCreation) + const path = getRecordsPathById(id, t) + log('writing to path', path) + + const records = readRecordFile(t) + set(records, path, messages) + + const pretty = JSON.stringify(records, null, 2) + Deno.writeTextFileSync(filename, pretty) +} + +const readRecordFile = (t: TestContext) => { + const baseTestRecord = z.object({ + recordings: z.record(id, z.unknown().array()).optional(), + }) + type TestRecord = z.infer & { + children?: Record + } + const testRecordSchema: z.ZodType = baseTestRecord.extend({ + children: z.lazy(() => z.record(testRecordSchema)).optional(), + }) + const fileSchema = z.record(testRecordSchema) + + const filename = getFilename(t) + let contents = fileSchema.parse({}) + try { + const data = Deno.readTextFileSync(filename) + contents = fileSchema.parse(JSON.parse(data)) + } catch (error) { + if (error instanceof Error) { + if (error instanceof ZodError) { + throw error + } + log('error reading recording file', error.message) + } + } + return contents +} + +export const removeSnapshotsFile = (t: TestContext) => { + const filename = getFilename(t) + try { + Deno.removeSync(filename) + } catch (_) { + log('error removing file') + } +} +const getRecordsPathById = (id: string, t: TestContext) => { + const path = getRecordingsPath(t) + return [...path, id] +} +const getRecordingsPath = (t: TestContext) => { + const path = [t.name, 'recordings'] + while (t.parent) { + path.unshift(t.parent.name, 'children') + t = t.parent + } + log('getRecordingsPath', path) + return path +} diff --git a/_import-artifact/isolates/youtube.test.ts b/_import-artifact/isolates/youtube.test.ts new file mode 100644 index 000000000000..448c56333f06 --- /dev/null +++ b/_import-artifact/isolates/youtube.test.ts @@ -0,0 +1,27 @@ +import Compartment from '@/io/compartment.ts' +import { Api, VideoDetails } from './youtube.ts' +import { expect } from '@utils' +import { createMockApi } from '@/tests/fixtures/mock-api.ts' +Deno.test('youtube', async () => { + const compartment = await Compartment.create('youtube') + const { api, stop } = await createMockApi('test/youtube') + const { fetch } = compartment.functions(api) + + const videoID = 'zIB7YsC34Tc' + const lang = 'en' + + const path = 'test/youtube/fetch.json' + const result = await fetch({ path, videoID, lang }) + expect(result.path).toEqual(path) + expect(result.success).toBeTruthy() + + type Result = { + details: VideoDetails + transcript: { start: string; text: string }[] + } + const data = await api.readJSON(path) + + expect(data.details).toBeDefined() + expect(data.transcript).toBeDefined() + stop() +}) diff --git a/_import-artifact/isolates/youtube.ts b/_import-artifact/isolates/youtube.ts new file mode 100644 index 000000000000..07bd6d1fb464 --- /dev/null +++ b/_import-artifact/isolates/youtube.ts @@ -0,0 +1,66 @@ +import { Functions, ToApiType } from '@/constants.ts' +import { + getSubtitles, + getVideoDetails, + type VideoDetails, +} from 'youtube-caption-extractor' +import { Debug } from '@utils' +import { z } from 'zod' + +const log = Debug('AI:youtube') + +export { type VideoDetails } + +export const parameters = { + fetch: z.object({ + path: z.string().regex(/.*\.json$/).describe( + 'Relative path to save the fetched transcription result. Must end in ".json" and must start with "./"', + ), + videoID: z + .string() + .describe( + 'The last part of a YouTube URL. For example, in the URL https://www.youtube.com/watch?v=zIB7YsC34Tc, the videoID is "zIB7YsC34Tc"', + ), + lang: z + .string() + .optional() + .describe('Language code for the subtitles - defaults to "en"'), + }).strict() + .describe( + 'Fetch video details and subtitles (subs). This will write the transcription json object to the given path, and return that path if everything was ok', + ), +} +export const returns = { + fetch: z.object({ + success: z.boolean().describe('Whether the fetch was successful'), + path: z.string().describe('The path to the saved transcription json file'), + }), +} + +export type Api = ToApiType + +export const functions: Functions = { + async fetch({ path, videoID, lang = 'en' }, api) { + const details = await fetchVideoDetails(videoID, lang) + const subs: VideoDetails['subtitles'] | string = await fetchSubtitles( + videoID, + lang, + ) + const transcript = subs.map(({ start, text }) => ({ start, text })) + api.writeJSON(path, { details, transcript }) + return { success: true, path } + }, +} + +const fetchSubtitles = async (videoID: string, lang = 'en') => { + const subtitles = await getSubtitles({ videoID, lang }) + log(subtitles) + return subtitles +} + +const fetchVideoDetails = async (videoID: string, lang = 'en') => { + const videoDetails = await getVideoDetails({ videoID, lang }) + const { subtitles, ...rest } = videoDetails + log('details had subtitles:', !!subtitles) + return rest +} diff --git a/_import-artifact/keys.ts b/_import-artifact/keys.ts new file mode 100644 index 000000000000..f96efe38102d --- /dev/null +++ b/_import-artifact/keys.ts @@ -0,0 +1,76 @@ +import { + isMergeReply, + isPierceRequest, + PID, + Poolable, + SolidRequest, +} from '@/constants.ts' +import { assert } from '@utils' +/** The current count of everything that has been atomically pooled */ +export const POOL_COUNTER = 'counter' +/** What value the pool has been processed up to */ +export const POOL_MARKER = 'marker' +export const getPoolKeyPrefix = (pid: PID) => { + const { repoId, account, repository, branches } = pid + return [repoId, account, repository, 'pool', ...branches] +} +export const getPoolMarkerKey = (pid: PID) => { + return [...getPoolKeyPrefix(pid), POOL_MARKER] +} +export const getPoolCounterKey = (pid: PID) => { + return [...getPoolKeyPrefix(pid), POOL_COUNTER] +} +export const getPoolKey = (poolable: Poolable) => { + const uniqueId = getId(poolable) + return [...getPoolKeyPrefix(poolable.target), uniqueId] +} +export const getRepoBase = (pid: PID) => { + const { repoId, account, repository } = pid + return [repoId, account, repository] +} +export const getExeId = (request: SolidRequest) => { + return idWithSequence(request.source, request.sequence) +} +const idWithSequence = (pid: PID, sequence: number) => { + return getPoolKeyPrefix(pid).join('/') + ':' + sequence +} +const getId = (poolable: Poolable) => { + if (!isMergeReply(poolable)) { + if (isPierceRequest(poolable)) { + return poolable.ulid + } + return idWithSequence(poolable.source, poolable.sequence) + } else { + return idWithSequence(poolable.target, poolable.sequence) + } +} + +export const getHeadKey = (pid: PID) => { + const prefix = getRepoBase(pid) + return [...prefix, 'refs', 'heads', ...pid.branches] +} +export const getEffectsLockKey = (pid: PID) => { + // TODO check if a branch is named with something.lock + const prefix = getRepoBase(pid) + const branches = [...pid.branches] + const last = branches.pop() + return [...prefix, 'refs', 'heads', ...branches, last + '.lock'] +} +export const getRepoLockKey = (pid: PID) => { + const prefix = getRepoBase(pid) + return [...prefix, 'index.lock'] +} +export const headKeyToPid = (headKey: string[]) => { + const [repoId, account, repository, refs, heads, ...branches] = headKey + assert(repoId, 'no id') + assert(refs === 'refs', 'not a refs path') + assert(heads === 'heads', 'not a heads path') + assert(branches.length > 0, 'no branches') + return { repoId, account, repository, branches } +} + +export const UNDELIVERED = ['_', 'system', 'system', 'undelivered'] + +export const HOME_ADDRESS = ['_', 'HOME_ADDRESS'] + +export const DB_LOCK = ['_', 'DB_LOCK'] diff --git a/_import-artifact/main.ts b/_import-artifact/main.ts new file mode 100644 index 000000000000..159a37bf2094 --- /dev/null +++ b/_import-artifact/main.ts @@ -0,0 +1,38 @@ +import { Debug, isKvTestMode } from '@/utils.ts' +import Server from '@/server/server.ts' +import { init } from '@/isolates/github.ts' +import { Engine } from '@/engine.ts' + +const getPrivateKey = () => { + const privateKey = Deno.env.get('SUPERUSER_PRIVATE_KEY') + if (!privateKey) { + throw new Error('SUPERUSER_PRIVATE_KEY not set') + } + return privateKey +} +const getAesKey = () => { + const aesKey = Deno.env.get('AES_KEY') + if (!aesKey) { + throw new Error('AES_KEY not set') + } + return aesKey +} + +Debug.enable( + 'AI:completions* AI:qbr AI:qex AI:server AI:engine AI:actors AI:hal AI:github AI:system', +) +const engine = await Engine.boot(getPrivateKey(), getAesKey()) +const server = Server.create(engine, init) + +const opts: { cert?: string; key?: string; hostname?: string; port?: number } = + {} +if (isKvTestMode()) { + opts.cert = Deno.readTextFileSync('tests/ssl/cert.pem') + opts.key = Deno.readTextFileSync('tests/ssl/key.pem') + opts.hostname = '127.0.0.1' + opts.port = 9000 + console.log('loading test ssl certs') +} + +Deno.serve(opts, server.fetch) + diff --git a/_import-artifact/napps/example-napp/instructions.md b/_import-artifact/napps/example-napp/instructions.md new file mode 100644 index 000000000000..4183df7d07f9 --- /dev/null +++ b/_import-artifact/napps/example-napp/instructions.md @@ -0,0 +1 @@ +These are the system prompt instructions that get given to the entry agent to run. \ No newline at end of file diff --git a/_import-artifact/napps/example-napp/napp.jsonc b/_import-artifact/napps/example-napp/napp.jsonc new file mode 100644 index 000000000000..6c24efc34ccc --- /dev/null +++ b/_import-artifact/napps/example-napp/napp.jsonc @@ -0,0 +1,129 @@ +{ + "name": "napp-name", + "version": "0.0.1", + "napp-format": "v1", + "description": "This is a description of the napp", + // what species of runtime needs to be used to execute this napp + "runtime": "deno", + // folder containing the branding assets of the napp + "branding": "path/to/branding/", + + "agent": { + // entry point that takes plain text in and returns text plus files + "napp": "napp-runner-name", + "tool": "some-exported-function", // if use a function other than main + "parameters": { + // parameters for the chat runner, based on what the runner is + "model": "gpt-4o", + "parallel_tool_calls": false, + "stop_on_tools": ["tool1", "tool3"], + // tools are reverse called back to the calling napp file + // if the tool list is not given, pass all the tools + // if the tool lists some things, use those + // can only be strings, else gets too complicated ? + // or allow a full json function definition here ? + "tools": [] + // content is the reserved name for the input text + + // if instructions was not named instructions, then can pass it in here + // instructions.md is preferred, or README.md, else error + } + }, + "main": "some-function", + "tools": { + "some-function": { + "description": "This function does something", + "parameters": { + "param1": { + "description": "This is the first parameter", + "type": "string", + "required": true + }, + "param2": { + "description": "This is the second parameter", + "type": "number", + "required": false + }, + "permissions": [ + // list of permissions required to run this function + ], + "gateways": [ + // list of gateways required to run this function + ] + }, + "returns": { + "description": "This is what the function returns", + "type": "string" + }, + "throws": { + "FileNotFoundError": { + // list of expected error types and what to do about it + } + } + }, + // if the function name is the same, as what is in a napp, + "some-other-function": "dependent-napp-name", + "modified-passthru": { + // passes thru to a tool of the same name + "parameters": {}, + "napp": { + "name": "dependent-napp-name", + "tool": "some-exported-function" + } + } + }, + "help": [ + // list of files that can be read to learn how to interface with this napp + // intended to be read by an AI + ], + "test": { + "napp": "napp-test-runner-name", + "parameters": { + // config for the test runner, based on what the runner is + "files": [ + // list of test files that can be run to verify the napp is working correctly + ] + } + }, + "dependencies": { + // list of napps that this napp depends on, including version numbers and + // commits so that every reference to the napp can be by name. + "dependent-napp-name": { + "version": "0.0.1", + "commit": "some-commit-hash" + } + }, + // list of the api gateway targets that this thing needs to do its job + "sideEffects": { + // default configurations for the production running of side effects + }, + "testSideEffects": { + // special services that can be used for testing - these are still live but + // are somehow test grade, as in might be cheaper, or no credentials + + }, + "mockSideEffects": { + // when running in the mock environment, these side effects are supplied + }, + "graphics": { + // a list of exported widgets that can be displayed on the stateboard + "googleMapsViewer": { + "type": "react", + "component": "./path/to/component", + "parameters": { + // mock react props that can be configured by the LLM + } + } + } +} + +// gateways can be just a fetch function with some urls +// can make a moneyworks connector, which could be a napp, running as a gateway +// host +// upstream dependencies ? +// tools being the same exported as what is in the agent +// need an agent config that is a generic way + +// gateways need to have backups and retry policies +// gateways need environments, like mock, test, prod + diff --git a/_import-artifact/server/cloud.ts b/_import-artifact/server/cloud.ts new file mode 100644 index 000000000000..d2c73461c0ff --- /dev/null +++ b/_import-artifact/server/cloud.ts @@ -0,0 +1,44 @@ +/** + * Tests the deployed instance in deno cloud. + * Not part of regular testing since can only run after the code is deployed + */ +import { assert } from '@utils' +import { WebClientEngine } from '../api/client-engine.ts' +import guts from '../guts/guts.ts' +import '@std/dotenv/load' +import { Backchat } from '../api/client-backchat.ts' +import type { CradleMaker } from '@/constants.ts' + +let introDone = false + +const cradleMaker: CradleMaker = async () => { + const url = Deno.env.get('CLOUD_URL') + assert(url, 'CLOUD_URL not set') + const privateKey = Deno.env.get('CLOUD_MACHINE_KEY') + assert(privateKey, 'CLOUD_MACHINE_KEY not set') + + const engine = await WebClientEngine.start(url) + + const backchat = await Backchat.upsert(engine, privateKey) + if (!introDone) { + introDone = true + console.log('testing:', url) + const repos = await backchat.lsRepos() + if (repos.length) { + console.log('deleting repos:', repos) + await backchat.rm({ all: true }) + + const postRepos = await backchat.lsRepos() + console.log('postRepos:', postRepos) + } + } + return { + backchat, + engine, + privateKey, + [Symbol.asyncDispose]: () => Promise.resolve(engine.stop()), + } +} +guts(cradleMaker) + +// TODO make a test that spins up a real localhost server diff --git a/_import-artifact/server/db-drop.ts b/_import-artifact/server/db-drop.ts new file mode 100644 index 000000000000..6a4f4d8dc6a4 --- /dev/null +++ b/_import-artifact/server/db-drop.ts @@ -0,0 +1,18 @@ +import { load } from '@std/dotenv' +await load({ export: true }) + +const url = Deno.env.get('DENO_KV_URL') +const db = await Deno.openKv(url) + +if (!confirm(`WARNING: The database ${url} will be reset. Continue?`)) { + Deno.exit() +} + +const all = db.list({ prefix: [] }, { batchSize: 1000 }) + +const promises = [] +for await (const { key } of all) { + console.log('deleting: ', key) + promises.push(db.delete(key)) +} +await Promise.all(promises) diff --git a/_import-artifact/server/db-tools.ts b/_import-artifact/server/db-tools.ts new file mode 100644 index 000000000000..fb0366900413 --- /dev/null +++ b/_import-artifact/server/db-tools.ts @@ -0,0 +1,36 @@ +import { load } from '@std/dotenv' +import { DB_LOCK, HOME_ADDRESS, UNDELIVERED } from '@/keys.ts' +import { PID, print } from '@/constants.ts' +await load({ export: true }) + +console.log('CLOUD_URL:', Deno.env.get('CLOUD_URL')) +console.log('db', Deno.env.get('DENO_KV_URL')) + +const db = await Deno.openKv(Deno.env.get('DENO_KV_URL')) +const undelivered = db.list({ prefix: UNDELIVERED }) + +const homeAddress = await db.get(HOME_ADDRESS) +console.log('homeAddress', homeAddress.value) +if (homeAddress.value) { + console.log('homeAddress', print(homeAddress.value)) +} + +const dbLock = await db.get(DB_LOCK) +console.log('dbLock', dbLock.value) + +// console.log('deleting dblock') +// await db.delete(DB_LOCK) + +for await (const { key, value } of undelivered) { + console.log('undelivered: ', key, value) +} + +const start = Date.now() +await db.get(['void key']) +console.log('latency', Date.now() - start) + +// const all = db.list({ prefix: [] }) + +// for await (const { key } of all) { +// console.log('key: ', key) +// } diff --git a/_import-artifact/server/keygen.ts b/_import-artifact/server/keygen.ts new file mode 100644 index 000000000000..5d91f0c2db96 --- /dev/null +++ b/_import-artifact/server/keygen.ts @@ -0,0 +1,26 @@ +import * as secp from '@noble/secp256k1' +import { assert } from '@std/assert' +import { generateKey } from '@kitsonk/kv-toolbox/crypto' + +console.log('private key') +console.time('private key') +const privKey = secp.utils.randomPrivateKey() +const privateKey = secp.etc.bytesToHex(privKey) +console.log(privateKey) +console.timeEnd('private key') +console.time('private key check ok') +assert(secp.utils.isValidPrivateKey(privateKey), 'invalid private key') +console.timeEnd('private key check ok') +console.log('') + +console.time('public key') +const publicKey = secp.etc.bytesToHex(secp.getPublicKey(privateKey)) +console.log('public key') +console.log(publicKey) +console.timeEnd('public key') +console.log('') + +console.log('AES encryption key') +console.time('AES encryption key') +console.log(generateKey()) +console.timeEnd('AES encryption key') diff --git a/_import-artifact/server/server.test.ts b/_import-artifact/server/server.test.ts new file mode 100644 index 000000000000..097a9b16fde5 --- /dev/null +++ b/_import-artifact/server/server.test.ts @@ -0,0 +1,55 @@ +import { type CradleMaker, Outcome } from '@/constants.ts' +import Server from './server.ts' +import { assert, delay, expect } from '@utils' +import { WebClientEngine } from '../api/client-engine.ts' +import guts from '../guts/guts.ts' +import { Backchat } from '../api/client-backchat.ts' +import { cradleMaker } from '@/cradle-maker.ts' +import { Engine } from '@/engine.ts' + +const webCradleMaker: CradleMaker = async (t, url, update, init) => { + const { engine: core, privateKey, backchat: { id: backchatId } } = + await cradleMaker(t, url, update, init) + assert(core instanceof Engine, 'not an engine') + + const server = Server.create(core) + const fetcher = server.request as typeof fetch + + const engine = await WebClientEngine.start('https://mock', fetcher) + const backchat = await Backchat.upsert(engine, privateKey, backchatId) + + // think the watcher is causing resource leaks + await delay(10) + + const clientStop = engine.stop.bind(engine) + engine.stop = async () => { + // must stop the client first, else will retry + clientStop() + await server.stop() + // oddly needs another loop to avoid resource leaks in fast tests + await delay(10) + } + return { + backchat, + engine, + privateKey, + [Symbol.asyncDispose]: () => Promise.resolve(engine.stop()), + } +} +Deno.test('hono basic', async (t) => { + await t.step('ping', async () => { + await using cradle = await cradleMaker(t, import.meta.url) + const { engine } = cradle + assert(engine instanceof Engine, 'not an engine') + const server = Server.create(engine) + const payload = { data: { ping: 'test', extra: 'line' } } + const res = await server.request('/api/ping', { + method: 'POST', + body: JSON.stringify(payload), + }) + const reply: Outcome = await res.json() + expect(reply.result).toEqual(payload) + }) +}) + +guts(webCradleMaker) diff --git a/_import-artifact/server/server.ts b/_import-artifact/server/server.ts new file mode 100644 index 000000000000..0ce90c387f05 --- /dev/null +++ b/_import-artifact/server/server.ts @@ -0,0 +1,272 @@ +import { createGitHubOAuthConfig, createHelpers } from '@deno/kv-oauth' +import { type Context, Hono } from 'hono' +// TODO try out the fast router to improve load times +import { cors } from 'hono/cors' +import { endTime, setMetric, startTime, timing } from 'hono/timing' +import { logger } from 'hono/logger' +import { poweredBy } from 'hono/powered-by' +import { prettyJSON } from 'hono/pretty-json' +import { SSEStreamingApi, streamSSE } from 'hono/streaming' +import { Engine } from '../engine.ts' +import { assert, Debug, delay, serializeError } from '@/utils.ts' +import { EventSourceMessage, machineIdRegex, Provisioner } from '@/constants.ts' +import '@std/dotenv/load' +type Seed = Deno.KvEntry[] + +const log = Debug('AI:server') + +let sseId = 0 +export default class Server { + #engine: Engine + #app: Hono + #provisioning: Promise | undefined + private constructor(engine: Engine, app: Hono) { + this.#engine = engine + this.#app = app + } + static create(engine: Engine, init?: Provisioner) { + const base = new Hono() + const server = new Server(engine, base) + + base.use(timing()) + base.use(prettyJSON()) + base.use('*', logger(), poweredBy(), cors()) + + base.use(async (_, next) => { + if (!server.#provisioning) { + // must be as part of a request, else deno deploy times out + server.#provisioning = engine.ensureHomeAddress(init) + } + await server.#provisioning + await next() + }) + + const app = base.basePath('/api') + app.post(`/ping`, async (c) => { + const params = await c.req.json() + return execute(c, engine.ping(params), 'ping') + }) + app.post(`/homeAddress`, (c) => { + return execute(c, Promise.resolve(engine.homeAddress), 'homeAddress') + }) + app.post(`/upsertBackchat`, async (c) => { + const { machineId, resume } = await c.req.json() as { + machineId: string + resume?: string + } + return execute( + c, + engine.upsertBackchat(machineId, resume), + 'upsertBackchat', + ) + }) + app.post(`/pierce`, async (c) => { + // TODO hook GitKV for write count, read count, and size + const params = await c.req.json() + return execute(c, engine.pierce(params), 'pierce') + }) + app.post(`/apiSchema`, async (c) => { + const params = await c.req.json() + return execute(c, engine.apiSchema(params.isolate), 'apiSchema') + }) + app.post('/watch', (c) => { + return streamSSE(c, async (stream) => { + const params = await c.req.json() + const abort = new AbortController() + stream.onAbort(() => abort.abort('stream aborted')) + engine.abortSignal.addEventListener('abort', () => abort.abort()) + const { pid, path, after } = params + keepalive(stream, abort.signal) + try { + const iterable = engine.watch(pid, path, after, abort.signal) + for await (const splice of iterable) { + const event: EventSourceMessage = { + data: JSON.stringify(splice, null, 2), + event: 'splice', + id: String(sseId++), + } + await stream.writeSSE(event) + } + log('stream end') + } catch (error) { + console.error('server stream error', error) + // if an error occurred, stall the stream to slow the clients + if (abort.signal.aborted) { + return + } + log('stalling stream indefinitely') + await delay(3600000, abort) + } + }) + }) + app.post(`/read`, async (c) => { + const params = await c.req.json() + const { path, pid, commit } = params + return execute(c, engine.read(path, pid, commit), 'read') + }) + app.post(`/readTree`, async (c) => { + const params = await c.req.json() + const { path, pid, commit } = params + return execute(c, engine.readTree(path, pid, commit), 'readTree') + }) + app.post(`/readJSON`, async (c) => { + const params = await c.req.json() + const { path, pid, commit } = params + return execute(c, engine.readJSON(path, pid, commit), 'readJSON') + }) + app.post(`/readBinary`, async (c) => { + const params = await c.req.json() + const { path, pid, commit } = params + return execute(c, engine.readBinary(path, pid, commit), 'readBinary') + }) + app.post(`/splice`, async (c) => { + // TODO add a zod schema for format + const { target, opts } = await c.req.json() + return execute(c, engine.splice(target, opts), 'splice') + }) + app.post(`/exists`, async (c) => { + const params = await c.req.json() + const { path, pid } = params + return execute(c, engine.exists(path, pid), 'exists') + }) + app.post('/transcribe', async (c) => { + const body = await c.req.parseBody() + const audio = body['audio'] as File + assert(audio, 'audio is required') + return execute(c, engine.transcribe(audio), 'transcribe') + }) + + const hooks = base.basePath('/hooks') + hooks.post('/github', async (c) => { + const body = await c.req.json() + console.log('github hook', body) + if (body?.repository?.full_name !== 'dreamcatcher-tech/HAL') { + return c.json({ error: 'invalid repository' }, 400) + } + const su = engine.superUser() + const target = { ...su.pid, branches: ['main'] } + const promise = su.pull({ repo: 'dreamcatcher-tech/HAL', target }) + .then(() => console.log('done pull')) + .catch((error) => console.error('pull error', error)) + console.log('starting pull') + + return execute(c, promise, 'hooks/github') + }) + + if (Deno.env.get('GITHUB_CLIENT_ID')) { + const { + signIn, + handleCallback, + signOut, + } = createHelpers(createGitHubOAuthConfig()) + const auth = base.basePath('/auth') + auth.get('/signin', async (c) => { + const { machineId } = c.req.query() + if (!machineIdRegex.test(machineId)) { + // TODO check key is valid using signatures + throw new Error('machineId querystring is required') + } + + const response = await signIn(c.req.raw) + const cookie = response.headers.get('set-cookie') + console.log('cookie', cookie) + // acting as the github actor, pierce the github chain to store this info + + return response + // c.header('set-cookie', response.headers.get('set-cookie')!) + // return c.redirect(response.headers.get('location')!, response.status) + }) + + auth.get('/callback', async (c) => { + const { response, tokens, sessionId } = await handleCallback(c.req.raw) + console.log('tokens', tokens, sessionId) // lol + // acting as the github actor, pierce the github chain to store this info + // as well as storing the token from github + // there should be one PAT per machine id + + // get the userId from github + // move the machine branch to be inside the user branch + // send the new pid down to the browser + + // make a fetch request to get the userId from github + + // pass back an id so the browser knows which pats it has + + return response + // c.header('set-cookie', response.headers.get('set-cookie')!) + // return c.redirect(response.headers.get('location')!, response.status) + }) + + auth.get('/signout', async (c) => { + const response = await signOut(c.req.raw) + return response + // c.header('set-cookie', response.headers.get('set-cookie')!) + // return c.redirect(response.headers.get('location')!, response.status) + }) + } + + // TODO set a cookie for the machineId so it doesn't have to prove again + // or get a sig on all pierce actions, and only allow correctly signed ones + // to enter + + return server + } + dump() { + return this.#engine.dump() + } + async stop() { + // TODO figure out why this stops resource leaks + await this.#provisioning + // TODO add all the read streams to be stopped too ? + await this.#engine.stop() + } + get request() { + return this.#app.request + } + get fetch() { + return this.#app.fetch + } +} + +const execute = async (c: Context, p: Promise, name: string) => { + startTime(c, name) + const denoVersion = navigator.userAgent.toLowerCase() + setMetric(c, 'deno', 'Deno Version: ' + denoVersion) + const region = Deno.env.get('DENO_REGION') || '(unknown)' + setMetric(c, 'region', 'Region: ' + region) + const deployment = Deno.env.get('DENO_DEPLOYMENT_ID') || '(unknown)' + setMetric(c, 'deployment', 'Deployment: ' + deployment) + + log('execute', name, c.req.url) + + try { + const result = await p + endTime(c, name) + if (result instanceof Uint8Array) { + return c.body(result) + // TODO handle an error in readBinary + } + return c.json({ result }) + } catch (error) { + endTime(c, name) + return c.json({ error: serializeError(error) }) + } +} +const keepalive = async (stream: SSEStreamingApi, signal: AbortSignal) => { + while (!signal.aborted) { + try { + // persistent attempts to stop the event loop from exiting while waiting + await delay(30000, { signal, persistent: true }) + } catch (_) { + return + } + if (!signal.aborted) { + const event: EventSourceMessage = { + data: '', + event: 'keepalive', + id: String(sseId++), + } + await stream.writeSSE(event) + } + } +} diff --git a/_import-artifact/tests/README.md b/_import-artifact/tests/README.md new file mode 100644 index 000000000000..19b1702e1b20 --- /dev/null +++ b/_import-artifact/tests/README.md @@ -0,0 +1,33 @@ +# Extra tests + +do 10k customer records then sync it down from a server + +generate a task list of things to do reprioritize it provide all the features of +the todolist in redux + +push a backup to github + +load the sysprompt edit the sysprompt from within hal do an action that triggers +some conventional code then convert the api calls to openai to do this +conventional way + +implement the stuckloop + +do some distributed computing where multiple machines reach consensus + +pull in npm packages using SSRI and ipfs + +push stucks to a remote repo + +sort customers + +display something in the stateboard + +edit portions of a text based on chat, then turn that in on itself as sysprompt +or knowledge + +do a vector search on goals + +show background processes that are happening after an initial trigger + +ping google diff --git a/_import-artifact/tests/auto-update.test.ts b/_import-artifact/tests/auto-update.test.ts new file mode 100644 index 000000000000..278f0b8a0b61 --- /dev/null +++ b/_import-artifact/tests/auto-update.test.ts @@ -0,0 +1,25 @@ +import { cradleMaker } from '@/cradle-maker.ts' +import { expect } from '@std/expect/expect' +import { getRoot } from '@/constants.ts' +import { Backchat } from '@/api/client-backchat.ts' + +Deno.test('auto update on backchat', async (t) => { + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner', + // ) + + await using cradle = await cradleMaker(t, import.meta.url) + const { backchat, engine, privateKey } = cradle + const path = 'new-file.md' + + expect(await backchat.exists(path)).toBeFalsy() + + const main = getRoot(backchat.pid) + await backchat.write(path, 'hello world', main) + + expect(await backchat.exists(path)).toBeFalsy() + + const next = await Backchat.upsert(engine, privateKey) + + expect(await next.exists(path)).toBeTruthy() +}) diff --git a/_import-artifact/tests/calculator.test.ts b/_import-artifact/tests/calculator.test.ts new file mode 100644 index 000000000000..1f3d0cd29c14 --- /dev/null +++ b/_import-artifact/tests/calculator.test.ts @@ -0,0 +1,6 @@ +// import { debug, goal } from '../tst-helpers.js' + +// should be able to do some math and get the answer right each time by using an +// AI that is tuned to do calculations effectively. More complex requests +// should trigger the stuckloop +// goal.todo('whats 100 / 3 ?') diff --git a/_import-artifact/tests/crm/customers.test.ts b/_import-artifact/tests/crm/customers.test.ts new file mode 100644 index 000000000000..ece44444d6dc --- /dev/null +++ b/_import-artifact/tests/crm/customers.test.ts @@ -0,0 +1,21 @@ +// expect it to be not sure what to do +// then we want to say "in my customers list, show me the first one" +// it should be confused and say we dont' have any customers and suggest we +// initialize a CRM. +// goal.todo('show me the first customer', async ({ result }) => { +// // this test function show the result of the goalie system +// debug(result) +// }) +// we should provision the crm to disk first, then we can add a customer +// also check adding a new customer once we have some customers already. +// goal.todo('add a new customer') +// goal.todo('show me the list of customers') +// goal.todo('who changed this customer last ?') +// goal.todo('add a note to this customer') +// goal.todo('add a note and make sure it stays private') +// goal.todo('what is this customers balance ?') +// goal.todo('is this customer synced with moneyworks ?') +// goal.todo('when did this customer last sync with moneyworks ?') +// goal.todo('what is this customers last invoice number ?') +// goal.todo('save this change to moneyworks') +// goal.todo('the template has changed - please update all the records') diff --git a/_import-artifact/tests/crm/initialize.test.ts b/_import-artifact/tests/crm/initialize.test.ts new file mode 100644 index 000000000000..93cc1d6ba973 --- /dev/null +++ b/_import-artifact/tests/crm/initialize.test.ts @@ -0,0 +1,2 @@ +// test.skip('start a new crm') +// test.skip('connect this up to moneyworks') diff --git a/_import-artifact/tests/crm/moneyworks.test.ts b/_import-artifact/tests/crm/moneyworks.test.ts new file mode 100644 index 000000000000..bab69bdd84eb --- /dev/null +++ b/_import-artifact/tests/crm/moneyworks.test.ts @@ -0,0 +1,2 @@ +// test.skip('how many customers are in moneyworks ?') +// test.skip('when was the last sync with moneyworks ?') diff --git a/_import-artifact/tests/crm/scheduling.test.ts b/_import-artifact/tests/crm/scheduling.test.ts new file mode 100644 index 000000000000..48bfb7fbb603 --- /dev/null +++ b/_import-artifact/tests/crm/scheduling.test.ts @@ -0,0 +1,2 @@ +// test.skip('which sectors are due to run today ?') +// test.skip('what is the manifest for the first sector tomorrow ?') diff --git a/_import-artifact/tests/crm/sectors.test.ts b/_import-artifact/tests/crm/sectors.test.ts new file mode 100644 index 000000000000..e2ff2bb36f15 --- /dev/null +++ b/_import-artifact/tests/crm/sectors.test.ts @@ -0,0 +1,3 @@ +// test.skip('how many sectors are there ?') +// test.skip('add a new sector') +// test.skip('change the name of sector A to be "bobs sector"') diff --git a/_import-artifact/tests/crm/sectors.ts b/_import-artifact/tests/crm/sectors.ts new file mode 100644 index 000000000000..f9332f61d702 --- /dev/null +++ b/_import-artifact/tests/crm/sectors.ts @@ -0,0 +1,2489 @@ +export default { + list: [ + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.25263667062976, -37.7440816347928], + [175.25447308995356, -37.74285429309597], + [175.2482092380524, -37.74007573162464], + [175.2468037605286, -37.73792916845374], + [175.2453339099884, -37.73694495267064], + [175.2380704879761, -37.73646980933366], + [175.23473381996155, -37.73175214933702], + [175.22886514663696, -37.724955135893936], + [175.2258932590485, -37.7217133941832], + [175.20791172981265, -37.7265335242151], + [175.21149516105655, -37.73596072380298], + [175.22545337677002, -37.74551680793711], + [175.23420095421903, -37.74272279101128], + [175.2365863331943, -37.74666492475831], + [175.23770570755008, -37.74760093824437], + [175.23989439010623, -37.74803359260726], + [175.2410852909088, -37.7475076196756], + [175.24511933326724, -37.746447179011966], + [175.25025486951563, -37.74492435964226], + [175.25263667062976, -37.7440816347928], + ], + ], + }, + properties: {}, + }, + name: 'Monday A Run', + frequencyInDays: 7, + frequencyOffset: 0, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.2795374393463, -37.77827922826108], + [175.28108596714452, -37.7802945985299], + [175.28319954915787, -37.78422625104449], + [175.27784228324893, -37.78722501921783], + [175.27147293003512, -37.78102385005064], + [175.26624441146853, -37.779203535490204], + [175.26553451993098, -37.77569986952771], + [175.2651107315615, -37.77544122226634], + [175.26472091674808, -37.775020034578], + [175.26410937309265, -37.77323349424174], + [175.2626878023148, -37.76947229034207], + [175.2626019716263, -37.769277228326324], + [175.26144325733188, -37.767920260933316], + [175.2605849504471, -37.767475000581804], + [175.258042216301, -37.76679650346249], + [175.25594472885135, -37.7661010374548], + [175.25428175926208, -37.76550310108544], + [175.25250077247622, -37.76422239841151], + [175.25032818317416, -37.76194082678353], + [175.24857938289645, -37.76017234831363], + [175.24605274200442, -37.757767658679306], + [175.2405846110196, -37.75217620606382], + [175.2422976487287, -37.75138728891317], + [175.24652481079104, -37.75340480610375], + [175.2496594192781, -37.751609261362326], + [175.25269031513747, -37.75328463351467], + [175.2539545297623, -37.752711335284936], + [175.254412293325, -37.75225255212947], + [175.25520890940246, -37.75219529226402], + [175.25548875331882, -37.75231617400641], + [175.25599747885283, -37.75239958910072], + [175.25798320770267, -37.75168136652006], + [175.25943696498874, -37.747077082598416], + [175.2596300840378, -37.74683318117238], + [175.26158273220065, -37.747745156301185], + [175.2642005681992, -37.749501206986864], + [175.26546657085422, -37.75137597839186], + [175.26611566543582, -37.75312346009644], + [175.26627659797668, -37.75533744318036], + [175.2645814418793, -37.75702969592087], + [175.2626556158066, -37.75888731225511], + [175.2624571323395, -37.76061341164975], + [175.26419520378113, -37.762403083969474], + [175.2658742666245, -37.76469736357979], + [175.2667808532715, -37.7673223392727], + [175.2681809663773, -37.7701804459439], + [175.27032136917117, -37.77226246894045], + [175.27294456958774, -37.77416634496604], + [175.27697324752808, -37.77643481920475], + [175.2773916721344, -37.776629862338226], + [175.2795374393463, -37.77827922826108], + ], + ], + }, + properties: {}, + }, + name: 'Monday B Run', + frequencyInDays: 7, + frequencyOffset: 0, + }, + { + color: 'red', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.27770638509543, -37.787247630329844], + [175.27376532554626, -37.78846504933552], + [175.27036696672442, -37.7890988344712], + [175.2693021297455, -37.789340476870834], + [175.26810318231585, -37.7899466990621], + [175.26695072629084, -37.7903338872366], + [175.2664911746979, -37.790374866983534], + [175.264887213707, -37.790105672589846], + [175.26420861482623, -37.78978136623045], + [175.2617034316063, -37.78798317120084], + [175.25967478708483, -37.78899426400356], + [175.2561736111238, -37.79055715554362], + [175.25348067327286, -37.790154424403156], + [175.25282263755798, -37.79019540424962], + [175.24979174137118, -37.790449760873706], + [175.24981856346133, -37.79024344937055], + [175.24882793448344, -37.789969308823856], + [175.2486848828994, -37.78821139240281], + [175.2472168207169, -37.786608882512205], + [175.24758875359112, -37.78546279834487], + [175.249187350164, -37.78422059803602], + [175.24983108065499, -37.782527554039476], + [175.24984717456394, -37.78124855940674], + [175.2460026745394, -37.775082223317185], + [175.24171829223636, -37.76952741647065], + [175.23877859115603, -37.76866235556156], + [175.23561716254335, -37.76416726832835], + [175.2459597605048, -37.757964164417615], + [175.24985074996948, -37.76180935861585], + [175.25388479232788, -37.76543949054887], + [175.25465190410617, -37.76569817305639], + [175.2561593055725, -37.76625370128503], + [175.2579510211945, -37.76684315033872], + [175.25925457477575, -37.76718664006672], + [175.2605098485947, -37.76754709053489], + [175.26122868061069, -37.76789905812022], + [175.26174902915957, -37.76837824021224], + [175.26256978511813, -37.76935779748216], + [175.26259124279025, -37.76945956892195], + [175.26415944055773, -37.773649038290834], + [175.2646350860596, -37.77510766422054], + [175.265246629715, -37.77564474825972], + [175.26611924215103, -37.7792939866079], + [175.27141928716452, -37.7811058193298], + [175.27770638509543, -37.787247630329844], + ], + ], + }, + properties: {}, + }, + name: 'Monday C Run', + frequencyInDays: 7, + frequencyOffset: 0, + }, + { + color: 'purple', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.24481177373673, -37.77532249851146], + [175.24293422611666, -37.77382995452557], + [175.242124199649, -37.77397412197067], + [175.2418398854934, -37.77309073868807], + [175.2409172062471, -37.77220451814591], + [175.2400016784668, -37.771938790833595], + [175.23942232131958, -37.771869532397346], + [175.23962438117454, -37.77106386399728], + [175.2395009985776, -37.770680815274225], + [175.2388787273958, -37.769602331227496], + [175.23834586165324, -37.769465222541626], + [175.2373650673326, -37.769249665173916], + [175.23424118746678, -37.76870758771211], + [175.23310482507443, -37.76850616301626], + [175.23289203687455, -37.76855775618023], + [175.23261129943424, -37.768693452870245], + [175.23198902694276, -37.76934366263516], + [175.23162782257717, -37.769181110535804], + [175.23098409175878, -37.769589609829744], + [175.22679984602294, -37.76829201602137], + [175.21790742830493, -37.771910522123626], + [175.21741390250102, -37.77425680328222], + [175.21805405660416, -37.77770259089175], + [175.2200460430686, -37.77802200326526], + [175.221370160798, -37.77779092371868], + [175.22187441592908, -37.777243255333005], + [175.22149354224896, -37.77658110160309], + [175.22187799215317, -37.7762397755567], + [175.22248864162978, -37.77637192470813], + [175.22422313668358, -37.776108332807986], + [175.224840044757, -37.77627016278243], + [175.2247846122191, -37.77656343431709], + [175.22518515586856, -37.77682914500833], + [175.22725939728844, -37.77700157337928], + [175.22769570350647, -37.777417096767316], + [175.22955536886008, -37.777609310293336], + [175.22987902164462, -37.77794921664444], + [175.2265512941085, -37.78011087171236], + [175.2266907696321, -37.7819990000329], + [175.22873193036506, -37.78295788479761], + [175.22875249385837, -37.78311546008684], + [175.22886067607035, -37.783345109590186], + [175.22856205669086, -37.78367297741706], + [175.22997736952678, -37.78446861567742], + [175.22990584340732, -37.784710979774324], + [175.23012131430735, -37.78483534141277], + [175.2306309343476, -37.78497878058263], + [175.23188352628495, -37.78382489827238], + [175.23319780821112, -37.7830341993503], + [175.23350000381473, -37.78331896508109], + [175.2337646486558, -37.78317340245812], + [175.23348838079986, -37.78284977237975], + [175.23505389701316, -37.781903605364256], + [175.23566544066855, -37.781860501167785], + [175.23557245753182, -37.781593395517746], + [175.23628056005697, -37.7811453910125], + [175.23613393306735, -37.780915028081466], + [175.23775130510333, -37.7799243175108], + [175.23875176906589, -37.779343452549824], + [175.23956179618835, -37.77886010158469], + [175.24102896451956, -37.77797394994008], + [175.24187386035922, -37.77747150995854], + [175.24304866790774, -37.77675918413214], + [175.24374336004257, -37.77635425776032], + [175.2453446388245, -37.77538963364644], + [175.24481177373673, -37.77532249851146], + ], + ], + }, + properties: {}, + }, + name: 'Monday D Run', + frequencyInDays: 7, + frequencyOffset: 0, + }, + { + color: 'orange', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.25616466999057, -37.790600960909615], + [175.25340378306288, -37.790218013676515], + [175.2529013154708, -37.79024062309651], + [175.24970948739795, -37.79053171995758], + [175.24972200393677, -37.790263232509574], + [175.24873852773453, -37.78998343998613], + [175.2486079933442, -37.78822693681807], + [175.24713456674365, -37.78664562470766], + [175.24747788884272, -37.78546562454289], + [175.24912118911743, -37.78418385424676], + [175.24971127510074, -37.78251200829601], + [175.249217748642, -37.78223183268178], + [175.24888783693316, -37.78201136581015], + [175.24880737066275, -37.782094040964076], + [175.24853244423866, -37.781945649596196], + [175.248232036829, -37.78222335319888], + [175.2479276061058, -37.782025145008895], + [175.24753734469417, -37.78242792045256], + [175.2479222416878, -37.78268548359888], + [175.24721413850787, -37.78345710773486], + [175.24661600589755, -37.78411637362315], + [175.24670317769053, -37.78417148883297], + [175.2463893592358, -37.78446190291357], + [175.2461721003056, -37.784386649739446], + [175.2453111112118, -37.78525046677759], + [175.24432070553303, -37.78578888995847], + [175.24396397173408, -37.785421639139344], + [175.2438707649708, -37.78549742120384], + [175.2440437674523, -37.78570515905209], + [175.24383522570136, -37.785828105666816], + [175.24393513798717, -37.78595794130945], + [175.24297758936885, -37.786433826735056], + [175.24314992129806, -37.78666593900452], + [175.24216286838057, -37.78699873797277], + [175.24135619401935, -37.787273243000165], + [175.24078622460365, -37.78746772760515], + [175.2404395490885, -37.78716460696259], + [175.24032488465315, -37.786887451860366], + [175.23975759744647, -37.787042192504494], + [175.23990109562877, -37.7874147345755], + [175.23966975510123, -37.7874883948764], + [175.23990042507654, -37.787980168241326], + [175.23903608322146, -37.7881937282847], + [175.23911587893966, -37.788307662050414], + [175.2389093488455, -37.78836860329485], + [175.23847214877605, -37.788400398706784], + [175.23831792175773, -37.78792346609138], + [175.23744285106662, -37.78822128437438], + [175.2373918890953, -37.78866112056856], + [175.23674279451373, -37.7887686945646], + [175.23652218282226, -37.78828805485674], + [175.23617953062057, -37.78839509947242], + [175.23514620959762, -37.788167232034944], + [175.23507177829745, -37.788256259396476], + [175.2339164167643, -37.78763041596874], + [175.2330815792084, -37.78717573553963], + [175.2321917563677, -37.78669667527778], + [175.2315466850996, -37.786346386992456], + [175.23092709481716, -37.786006165918614], + [175.23051202297214, -37.78578783007417], + [175.22946953817154, -37.78655376924945], + [175.21900534629825, -37.79326599533487], + [175.22902607917788, -37.796852193826126], + [175.22978246212008, -37.79705566086226], + [175.23280262947083, -37.797170110823814], + [175.2355840802193, -37.79730151611656], + [175.23734360933307, -37.79733966599649], + [175.24028062820437, -37.79661481490901], + [175.2441617846489, -37.795631379840934], + [175.24780422449115, -37.79474119025836], + [175.24819135709555, -37.79422261386735], + [175.25616466999057, -37.790600960909615], + ], + ], + }, + properties: {}, + }, + name: 'Monday E Run', + frequencyInDays: 7, + frequencyOffset: 0, + }, + { + color: 'black', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.2305044234345, -37.785757799969616], + [175.22788524649516, -37.787497757144244], + [175.22180914881577, -37.78568466801601], + [175.2189052107133, -37.78447780146168], + [175.21719574906462, -37.781159523862094], + [175.21813631057742, -37.77785240376354], + [175.21990925073626, -37.77806581632036], + [175.2214068175454, -37.777812830317956], + [175.22192448388526, -37.77725738918745], + [175.22154003381732, -37.77659028836759], + [175.22190302604943, -37.77627016291183], + [175.22244930267337, -37.77641220556109], + [175.2242365478924, -37.77613730668102], + [175.22480070580065, -37.77628712311311], + [175.22473901532067, -37.77656555486781], + [175.22518336751094, -37.77687013224383], + [175.2272441983769, -37.77702701375155], + [175.22768229246145, -37.77744606986864], + [175.2295365930695, -37.77764181708779], + [175.22983342409137, -37.777936496812174], + [175.22647261619568, -37.78007129999339], + [175.22658705700454, -37.78200323952856], + [175.22865593433383, -37.782981203141524], + [175.22881865490493, -37.78333239055704], + [175.22852003585288, -37.783688522786946], + [175.22995412349704, -37.784479921267824], + [175.22986739895717, -37.78472369857022], + [175.23009628057486, -37.78485653911028], + [175.2306362984382, -37.785018350062415], + [175.23188531398776, -37.783901211929845], + [175.23319691419607, -37.7830879022227], + [175.23348122835162, -37.78336065515841], + [175.2338129284181, -37.783174109029524], + [175.23352771991998, -37.78285966493961], + [175.2350655197006, -37.7819339902618], + [175.2356958388191, -37.78189583242732], + [175.23560732608533, -37.78159904859776], + [175.23634850978854, -37.78115104425617], + [175.23625016190636, -37.780926334086054], + [175.24134099461665, -37.77789197692915], + [175.24335175752643, -37.77661572888138], + [175.24539649497459, -37.775383273445684], + [175.24595350027084, -37.77508293009533], + [175.24977743581985, -37.78122877359566], + [175.24973094507007, -37.78245123896366], + [175.24888515472415, -37.781943883203006], + [175.24878680749683, -37.78203150463822], + [175.2485078571044, -37.78189865902051], + [175.2482217552461, -37.782154457364236], + [175.247880220195, -37.78193540342829], + [175.24741172746874, -37.78240601586791], + [175.2478694920137, -37.78269431702838], + [175.24653196291186, -37.78409058286228], + [175.24662494637596, -37.7841810290348], + [175.24638175920703, -37.78440714191172], + [175.24608850457304, -37.78432234953456], + [175.24526596025683, -37.78517026892852], + [175.24426102638247, -37.785715757906246], + [175.24400353431705, -37.785348330247764], + [175.24380326314713, -37.785483996121165], + [175.24395346685196, -37.78569879973887], + [175.24373173800998, -37.78580620157236], + [175.24383902637058, -37.785936213728], + [175.24290204091818, -37.786388429085214], + [175.24307370229508, -37.78664279881353], + [175.2408063411713, -37.787405902745206], + [175.24048447608948, -37.787100662118135], + [175.2402842049196, -37.78673889364387], + [175.23964762687683, -37.78699891496226], + [175.23984074592593, -37.78742286052085], + [175.23957610173966, -37.78746808106438], + [175.2398550514772, -37.78794289742958], + [175.23898243904114, -37.788135083584145], + [175.23902535438538, -37.78826509216015], + [175.23851037025455, -37.788304659492226], + [175.23833155675675, -37.78783549818387], + [175.23738741918356, -37.7881576933827], + [175.23731589339152, -37.78859858979939], + [175.23677945158855, -37.78867207296976], + [175.2365577220917, -37.78818030420934], + [175.2361643315453, -37.788310311670784], + [175.23514151573184, -37.788078558539986], + [175.23507714271548, -37.78811812597194], + [175.23266673098988, -37.78690847275582], + [175.23266673098988, -37.78690847275582], + [175.2305044234345, -37.785757799969616], + ], + ], + }, + properties: {}, + }, + name: 'Monday F Run', + frequencyInDays: 7, + frequencyOffset: 0, + }, + { + color: 'violet', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.25492191402012, -37.74303387048904], + [175.25274753832494, -37.7441311237709], + [175.2404737485631, -37.747920480119156], + [175.23985862557313, -37.7482824380227], + [175.23741245095152, -37.74768859934053], + [175.23407220753145, -37.742999934571884], + [175.22546053020054, -37.745731725079445], + [175.21165609359744, -37.736269004137654], + [175.21245002746585, -37.7379715912537], + [175.2170348171785, -37.75160784761623], + [175.20953178405765, -37.76250486498014], + [175.20551204594088, -37.76734495543773], + [175.20706415176394, -37.77061862231529], + [175.21751403808597, -37.77182288920935], + [175.22668361538314, -37.7679725618698], + [175.23091793022104, -37.76941999150154], + [175.23153304928206, -37.768967672332074], + [175.23199081382703, -37.76912598492551], + [175.23279905308303, -37.76839944288794], + [175.23750007130732, -37.76921220762634], + [175.23890912554634, -37.76955144562335], + [175.239602923175, -37.770629930412234], + [175.23969948347712, -37.77107941214829], + [175.23949205887678, -37.7718341960869], + [175.23998200980714, -37.771897801122975], + [175.24099767251758, -37.77218331656124], + [175.24188816591052, -37.77307377788292], + [175.24215996352726, -37.773927479851515], + [175.24294674396518, -37.773786138960084], + [175.24425923867966, -37.774832054226735], + [175.24483144305128, -37.775275856725365], + [175.2453392750613, -37.7753465257809], + [175.2458900211059, -37.77505254251101], + [175.24376213485087, -37.7722850841122], + [175.2416700124741, -37.76955992681862], + [175.2387160058425, -37.76869203919179], + [175.23607671151694, -37.76493201777832], + [175.23546516917128, -37.76414747820461], + [175.23849070159486, -37.76235219341162], + [175.24248898072983, -37.75994898822708], + [175.24600982666018, -37.75782420741859], + [175.24599194570328, -37.75763618309461], + [175.24045944039247, -37.752179033016525], + [175.24221897212558, -37.75121762884103], + [175.24651765736056, -37.75334401310657], + [175.24965763092044, -37.75157674260623], + [175.25269925594333, -37.75325494370085], + [175.25392055522389, -37.7527007316992], + [175.25439351818935, -37.75222710336338], + [175.25521874405968, -37.75217055034592], + [175.25552183433322, -37.75229638045315], + [175.25602877216446, -37.75233879501874], + [175.25794208104347, -37.751651676062906], + [175.25904893918778, -37.74810004599688], + [175.25944232940677, -37.74679076345102], + [175.25577664353477, -37.744658534837185], + [175.25492191402012, -37.74303387048904], + ], + ], + }, + properties: {}, + }, + name: 'Monday G Run', + frequencyInDays: 7, + frequencyOffset: 0, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.28937846422198, -37.802983511714494], + [175.28896808624268, -37.8035917412185], + [175.2875304222107, -37.8012859526985], + [175.286865234375, -37.79933191012392], + [175.28981029987338, -37.79666992079705], + [175.2913767099381, -37.79265556123337], + [175.29020726680758, -37.79117608543848], + [175.28332114176013, -37.784480627697356], + [175.27730226604038, -37.787637658503066], + [175.27386188507083, -37.78856184818681], + [175.2693808070035, -37.78937863086169], + [175.2661192414962, -37.7911478240849], + [175.26600122429954, -37.799880121239205], + [175.26612639427185, -37.80339818225478], + [175.2661871909004, -37.80648093990127], + [175.27189493201152, -37.80805476336766], + [175.2722346780502, -37.80759702991929], + [175.27504205703735, -37.808032158623526], + [175.27713060335375, -37.80711386375679], + [175.27784585996415, -37.80785697731293], + [175.2792584896088, -37.807136468264645], + [175.28000593141772, -37.80810279713254], + [175.280374288559, -37.80805476336766], + [175.2876555915282, -37.804392816336794], + [175.28814733028415, -37.80456941885746], + [175.28623938582317, -37.80734979625299], + [175.28641641096328, -37.809451962869815], + [175.28578698635104, -37.81007921100677], + [175.28372347376717, -37.81100877143358], + [175.28218925004697, -37.81158091110598], + [175.2829134466447, -37.813236560544716], + [175.28304576873782, -37.81445143336506], + [175.28245210734897, -37.81718623660967], + [175.284240244946, -37.81781341774705], + [175.28552055184264, -37.8171240817518], + [175.28646469203525, -37.81554762603039], + [175.28769493277653, -37.81605051272204], + [175.29379606246948, -37.812897522797606], + [175.29510498046875, -37.81354734343365], + [175.30178546730897, -37.81198211206907], + [175.2986240386963, -37.809281023634135], + [175.29760479927063, -37.8055202406721], + [175.29328107833865, -37.80502010718875], + [175.28937846422198, -37.802983511714494], + ], + ], + }, + properties: {}, + }, + name: 'Tuesday A RUN', + frequencyInDays: 7, + frequencyOffset: 1, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.2659368512832, -37.79052041490487], + [175.26485323884117, -37.790172794815795], + [175.26438832283023, -37.78995941695692], + [175.26169002056128, -37.788041109755845], + [175.25967299938205, -37.78906703935979], + [175.25599300861361, -37.79073732402739], + [175.2523881196976, -37.79237153809193], + [175.2482450012758, -37.79430456954241], + [175.24784445762637, -37.79477086341725], + [175.2430433034897, -37.79594930209237], + [175.23895561695102, -37.796994196920835], + [175.23734092712402, -37.7973799352928], + [175.23490816354754, -37.797309993869355], + [175.22979587316513, -37.79709169142483], + [175.2272799611092, -37.79627146191151], + [175.22356510162356, -37.7949446631086], + [175.222205221653, -37.794474131291956], + [175.21899193525314, -37.79329566908626], + [175.21431684494019, -37.79884021231207], + [175.2063345909119, -37.822497339164364], + [175.20541191101077, -37.833259627739935], + [175.2231001856126, -37.83286419119225], + [175.2349162101746, -37.81599400903759], + [175.2442574509769, -37.806811530450545], + [175.265429019928, -37.80960312427246], + [175.2654004088254, -37.806455509797715], + [175.26583313898303, -37.80349849407346], + [175.26585459665515, -37.79973600380628], + [175.2659368512832, -37.79052041490487], + ], + ], + }, + properties: {}, + }, + name: 'Tuesday B Run', + frequencyInDays: 7, + frequencyOffset: 1, + }, + { + color: 'red', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.26588812502496, -37.80712587377672], + [175.26588823671037, -37.807125697158796], + [175.26588825067105, -37.80712567508155], + [175.26588825765134, -37.80712566404293], + [175.26588826114155, -37.807125658523624], + [175.26588826288665, -37.80712565576397], + [175.2658882637592, -37.80712565438414], + [175.26588826419547, -37.80712565369423], + [175.26588826441358, -37.80712565334927], + [175.26588826452266, -37.8071256531768], + [175.2658882645772, -37.80712565309055], + [175.26588826460446, -37.80712565304745], + [175.26588826463168, -37.80712565300431], + [175.26588829255306, -37.80712560884983], + [175.26588834839575, -37.80712552054087], + [175.26588846008113, -37.807125343922934], + [175.26588851592382, -37.80712525561398], + [175.26588854384514, -37.80712521145949], + [175.2658885717665, -37.80712516730501], + [175.26550769456662, -37.815072985537384], + [175.27577161788943, -37.8444439648106], + [175.28521299362185, -37.844161552583444], + [175.2918934830814, -37.83198857304299], + [175.2959704401292, -37.83055084290494], + [175.2968215942383, -37.82719226278574], + [175.29675543286433, -37.826758658798965], + [175.29413580850817, -37.825116024735756], + [175.29211521192343, -37.82426008918849], + [175.28984427539402, -37.823243123158974], + [175.28691172643454, -37.82195494607129], + [175.28249859940846, -37.82005088858927], + [175.28199792082887, -37.819367224325475], + [175.28182625945195, -37.818745705205146], + [175.2819335478125, -37.81821458535876], + [175.28217673432667, -37.817338793373835], + [175.28285264968875, -37.81477068688855], + [175.2828329800468, -37.813209719423384], + [175.28260409842918, -37.81270398744872], + [175.28211236000064, -37.81171935426242], + [175.28204083453608, -37.81156395887545], + [175.28213381767273, -37.81147213439829], + [175.28593540235306, -37.809896970877], + [175.2863466748386, -37.809312102819106], + [175.2861785888672, -37.8073088259221], + [175.2870208024979, -37.805962446442166], + [175.28805971189286, -37.8046075655011], + [175.28763771057132, -37.80451997031842], + [175.2836608884536, -37.80653179956472], + [175.28034925449904, -37.808139528509685], + [175.27989506765158, -37.80830623266026], + [175.27918696447165, -37.8073427323331], + [175.27783513112814, -37.80802933380607], + [175.27704834938052, -37.80723818717096], + [175.2749812607363, -37.808294930327556], + [175.27229189763605, -37.80783154871797], + [175.27210593201744, -37.80828645487029], + [175.26977777426512, -37.80780612011425], + [175.2658295625952, -37.80682283404504], + [175.26588812502496, -37.80712587377672], + ], + ], + }, + properties: {}, + }, + name: 'Tuesday C Run', + frequencyInDays: 7, + frequencyOffset: 1, + }, + { + color: 'purple', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.30198573978853, -37.812010365969314], + [175.2949619275751, -37.81371686100979], + [175.29377460217808, -37.81307269214979], + [175.28752326747056, -37.816270878023175], + [175.28650760476015, -37.815694536615254], + [175.28563499232408, -37.817208836482116], + [175.28417587061998, -37.81794337409171], + [175.28235912061064, -37.81737834565081], + [175.28210162854526, -37.8192768217473], + [175.28276681900027, -37.81996896303473], + [175.28605699561015, -37.82150012413529], + [175.2937602992461, -37.824697945419054], + [175.29695034027102, -37.82674877211764], + [175.29698610349448, -37.82720638682648], + [175.29610633893753, -37.830652530068015], + [175.29206156774308, -37.832095907569624], + [175.28537749894898, -37.84434794764985], + [175.2756786350801, -37.844652949596814], + [175.2834749221802, -37.86501758717914], + [175.31102657318118, -37.854988451032156], + [175.3136587212794, -37.8479627275549], + [175.32784938812256, -37.84062001680008], + [175.33563137229066, -37.84712682444365], + [175.34398555755615, -37.85089960549547], + [175.3459024464246, -37.83895933331503], + [175.33754825592044, -37.83606717010916], + [175.3304243035382, -37.83373979688079], + [175.322985642124, -37.8265538597356], + [175.3235292399768, -37.817785166067594], + [175.32247066323183, -37.812654545132695], + [175.32029628229796, -37.80851814564598], + [175.31548976374327, -37.807930440293504], + [175.31357288011353, -37.80542134379941], + [175.3037595731439, -37.8077722128813], + [175.29969691706359, -37.80768179558676], + [175.2984952874249, -37.80689064626299], + [175.29878139321227, -37.809083239829185], + [175.30198573978853, -37.812010365969314], + ], + ], + }, + properties: {}, + }, + name: 'Tuesday D RUN', + frequencyInDays: 7, + frequencyOffset: 1, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.3626036635251, -37.99625286472122], + [175.36138772964478, -38.015245681517264], + [175.38591384887695, -38.034842007676225], + [175.4230213304982, -38.07302794496402], + [175.34317016601565, -38.058612002480345], + [175.3210902207502, -38.04288636557839], + [175.32769203098727, -38.02466136145733], + [175.32882213679844, -38.02209203173544], + [175.32832324461197, -38.02198074825163], + [175.32814800717463, -38.02241601999017], + [175.32614886760715, -38.021986382767786], + [175.32628119035508, -38.02142996363027], + [175.32595932461848, -38.021063710723574], + [175.3258198497497, -38.02066083059247], + [175.3257966043748, -38.02041290337608], + [175.32520115331863, -38.02047347653628], + [175.32519936561587, -38.0198550634275], + [175.32544612884524, -38.01846467203559], + [175.3254729509354, -38.017896956239944], + [175.32599151112663, -38.01732782765045], + [175.32565936446193, -38.016850615261895], + [175.32524406931776, -38.01696014532292], + [175.32427579153594, -38.015911325778035], + [175.32442241918034, -38.01558942166742], + [175.3243982792992, -38.015468971439155], + [175.3242686390877, -38.01524215823428], + [175.32440900780784, -38.014959697233614], + [175.3257662055694, -38.0128760737961], + [175.32845377965714, -38.011980055242475], + [175.32878637357499, -38.01174618679025], + [175.3285717968538, -38.01064446008748], + [175.3289151196077, -38.00973714282559], + [175.3300309185579, -38.0089002603281], + [175.33056020758525, -38.00832260840282], + [175.32994508721464, -38.00749698050246], + [175.33204436302188, -38.007163063575824], + [175.3336465359826, -38.00602040764172], + [175.33427417223723, -38.00648395374543], + [175.33461570696093, -38.00602604364288], + [175.3345781566896, -38.0052356143715], + [175.33495902971484, -38.004991861908316], + [175.33555626912857, -38.00506231055807], + [175.33663988091575, -38.00449730901623], + [175.337650179863, -38.00440290719623], + [175.33787190870498, -38.00360541532244], + [175.33889293648832, -38.00301222217073], + [175.3406363726754, -38.004169014571936], + [175.3425318002701, -38.002752963010394], + [175.34309506416324, -38.0024796127669], + [175.3437370059692, -38.00257260812544], + [175.3441518545151, -38.003024903306006], + [175.34574866207555, -38.002624741894095], + [175.3626036635251, -37.99625286472122], + ], + ], + }, + properties: {}, + }, + name: 'Te Awamutu A Run', + frequencyInDays: 14, + frequencyOffset: 1, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.3239816427231, -38.00982308524632], + [175.32417297406943, -38.00969487675175], + [175.32572686694044, -38.01017107874874], + [175.32571434974673, -38.012857759147096], + [175.32433837652212, -38.0149343390894], + [175.32421767711642, -38.01526540310252], + [175.32436251640323, -38.015466153930575], + [175.32437324523926, -38.01559928312385], + [175.32423734643095, -38.01591484761011], + [175.32523781072086, -38.017011564659846], + [175.3256508709092, -38.01688125548611], + [175.32569020986563, -38.016962258443534], + [175.3259038927354, -38.01732078386398], + [175.32541126012802, -38.017866668472294], + [175.3253665564262, -38.01852172484156], + [175.3251385689873, -38.019461333152776], + [175.32515645027163, -38.02051291946741], + [175.32570093870163, -38.02046643305216], + [175.32585471879432, -38.021048919834776], + [175.32618194829413, -38.0214088337096], + [175.3260308504105, -38.02202723370624], + [175.32817125320437, -38.02248222597262], + [175.3283697366715, -38.02207090200567], + [175.3287675977845, -38.02211104822908], + [175.3276857733727, -38.02455219451943], + [175.32735317945486, -38.02541847923635], + [175.32528162351815, -38.03080259559669], + [175.3214478510199, -38.04135980954493], + [175.3208041234757, -38.04294832883493], + [175.3134226842667, -38.04240755982844], + [175.27857542048878, -38.01579228373054], + [175.29607772652525, -38.010337325770806], + [175.2972972397402, -38.012757732350735], + [175.29982209227458, -38.01207022129911], + [175.3001886607308, -38.01056415453996], + [175.30089497555312, -38.01073885438705], + [175.30144214651958, -38.01146441695887], + [175.3020823002953, -38.011885662541154], + [175.30294597192554, -38.01164756775624], + [175.3035235400603, -38.010107679230195], + [175.30324101491715, -38.00988366715885], + [175.30330181089087, -38.00960611709183], + [175.30396163463595, -38.00772381754396], + [175.30410289753493, -38.00763928176051], + [175.30449628742647, -38.00642055000633], + [175.30694246204808, -38.0069897648342], + [175.30709624246813, -38.004216919971704], + [175.31158626101387, -38.00600068240821], + [175.3116291760307, -38.00664739115221], + [175.3129595520295, -38.00655721868367], + [175.31367123094245, -38.00654031131735], + [175.31412899516005, -38.00667557013874], + [175.3140199184418, -38.00693340657584], + [175.3149425983429, -38.00745189441754], + [175.31581163406375, -38.00749979818836], + [175.31613528733945, -38.00718983325732], + [175.32108664501723, -38.00872132938329], + [175.3209489588335, -38.00897211403709], + [175.3239816427231, -38.00982308524632], + ], + ], + }, + properties: {}, + }, + name: 'Te Awamutu B Run', + frequencyInDays: 14, + frequencyOffset: 1, + }, + { + color: 'red', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.327949525672, -37.841105791341626], + [175.31433105992622, -37.848335495932716], + [175.31142711552096, -37.85538376793355], + [175.28572082563193, -37.864599732460185], + [175.2940750148264, -37.892647116781205], + [175.30486107047182, -37.92804111178416], + [175.30159950430973, -37.943520628248116], + [175.3066349012079, -37.991067033557634], + [175.30095934824206, -38.00059572128231], + [175.29694676421062, -38.005194754073706], + [175.29619932174685, -38.01026970028505], + [175.29742956117846, -38.01256613185426], + [175.2997219566896, -38.01195187855275], + [175.30014395670153, -38.01048384890447], + [175.30097722985374, -38.01067686388345], + [175.30150651920852, -38.011430604485696], + [175.30206978342903, -38.01185325927953], + [175.30288875135741, -38.01159262261667], + [175.30346274375918, -38.01012599482427], + [175.30319273515488, -38.00994143142936], + [175.30324459097758, -38.00960188991045], + [175.30436038992778, -38.00625147585251], + [175.30674219218784, -38.006786876346055], + [175.30697107315063, -38.0040957472473], + [175.31166851531452, -38.00598236604148], + [175.3116810321808, -38.006600895942626], + [175.3136712312699, -38.006483953487454], + [175.3141826393403, -38.00665020887076], + [175.31408071539775, -38.00693199772202], + [175.3149604792998, -38.00741526240459], + [175.31584024451152, -38.00740821766556], + [175.3161486982208, -38.00714404281662], + [175.31904637879055, -38.00801969163146], + [175.32117962880875, -38.00868328892648], + [175.3210437300004, -38.00894816289458], + [175.3239512452274, -38.009773774194336], + [175.32420873729282, -38.00962443255325], + [175.3257822994783, -38.01013163050983], + [175.3258001804352, -38.012807041653474], + [175.3286647796631, -38.011756048892735], + [175.32844305082114, -38.0105275240821], + [175.32886505148784, -38.00963711228697], + [175.33046364740588, -38.00834515066506], + [175.3298288582937, -38.007471619776595], + [175.3320103884835, -38.00711093277593], + [175.33368051052096, -38.00593305266417], + [175.33429205417636, -38.006390963089494], + [175.3345370293755, -38.00604999625602], + [175.33453524101793, -38.005214480040024], + [175.33493578433993, -38.004959455599575], + [175.33556699763722, -38.005012996303954], + [175.3366237876617, -38.00445363024616], + [175.33760368845833, -38.00437472759423], + [175.33784151088184, -38.00357441706132], + [175.33888578436748, -38.00295163458015], + [175.3406846514554, -38.00409856454792], + [175.34248709722306, -38.002703646945115], + [175.3430771832064, -38.0024387506753], + [175.34376740411975, -38.00252329219687], + [175.34419655756213, -38.00296008867756], + [175.34570574804093, -38.002540201005495], + [175.34730434461386, -38.001390427710184], + [175.34688592000754, -37.99885410074063], + [175.35355567932132, -37.99645578442058], + [175.42596817715096, -37.933141141286875], + [175.41715621948245, -37.90578640854562], + [175.3824806213379, -37.90280656678573], + [175.37462711334229, -37.88462014917964], + [175.37555694929324, -37.877077844052316], + [175.37268161599062, -37.87188359115548], + [175.3609371185303, -37.87396133709055], + [175.3467321395874, -37.8595853075725], + [175.343770980835, -37.851486968700705], + [175.33547401253603, -37.84762384726844], + [175.3280067373999, -37.84110577893042], + [175.327949525672, -37.841105791341626], + ], + ], + }, + properties: {}, + }, + name: 'Te Awamutu C', + frequencyInDays: 14, + frequencyOffset: 1, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.2069139480591, -37.70787923714222], + [175.1947259902954, -37.69305769827658], + [175.17481327056888, -37.67935971195851], + [175.16831874934726, -37.67624618110306], + [175.15319824393373, -37.66536478044187], + [175.14868497935825, -37.66719921815032], + [175.1538920402527, -37.67543664331602], + [175.15370428562167, -37.676134375720856], + [175.14314174652102, -37.68459015022161], + [175.12694835684673, -37.67980125878629], + [175.1426696777344, -37.647250104679706], + [175.17738819122317, -37.650444135090844], + [175.21504640579226, -37.68643543466596], + [175.21088361740115, -37.69789667078305], + [175.2069139480591, -37.70787923714222], + ], + ], + }, + properties: {}, + }, + name: 'Ngaruawahia A Run', + frequencyInDays: 14, + frequencyOffset: 8, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.206892490387, -37.708043338740474], + [175.1911282548099, -37.715976351746086], + [175.2090454136487, -37.73870691868538], + [175.2090454136487, -37.73872954256308], + [175.21224975760563, -37.738435412971036], + [175.2164268528577, -37.75191888974838], + [175.1429271697998, -37.76074064096951], + [175.12999534628764, -37.73214530053211], + [175.1203536987305, -37.70765854867752], + [175.12670516967776, -37.67997108299219], + [175.1435565983411, -37.68504298605354], + [175.15368461958133, -37.67655188007848], + [175.154221058765, -37.675447966990006], + [175.14895677697498, -37.667204880616055], + [175.15051603491884, -37.6666273773826], + [175.1531767862616, -37.66551765301932], + [175.16801119112645, -37.676274488939356], + [175.1748061184481, -37.6796257726741], + [175.1937317865668, -37.69258793394241], + [175.19665718078613, -37.69572341693572], + [175.206892490387, -37.708043338740474], + ], + ], + }, + properties: {}, + }, + name: 'Ngrauawahia B Run', + frequencyInDays: 14, + frequencyOffset: 8, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.30903458638932, -37.80611079009724], + [175.30349627137184, -37.80745928550214], + [175.30061155557635, -37.807563123625066], + [175.29911622405055, -37.80680870455165], + [175.2976101636887, -37.80548739302606], + [175.29501646757126, -37.80520235961306], + [175.2933038771153, -37.80499891501721], + [175.29216930270198, -37.80441824715063], + [175.2903065085411, -37.80344021412763], + [175.28935968875888, -37.802941126196515], + [175.28900161385536, -37.803369218810275], + [175.2884450554848, -37.80240812623179], + [175.2875626087189, -37.80089811538828], + [175.28714150190353, -37.79963921959426], + [175.2876417338848, -37.79878086925137], + [175.2881821990013, -37.798311421573025], + [175.28901368379596, -37.79759505774075], + [175.2895098924637, -37.797169051102756], + [175.28994977474215, -37.79679072971542], + [175.290462076664, -37.79583696971983], + [175.29099047183993, -37.79487259987157], + [175.29154032468796, -37.79387536462462], + [175.2915725111962, -37.79262164807985], + [175.29068872332576, -37.79155549537935], + [175.28980895876884, -37.79050840399602], + [175.28949916362765, -37.79011733063508], + [175.28835386037827, -37.78904796228638], + [175.2868424355984, -37.78762458675018], + [175.28567165136337, -37.78652550586924], + [175.2843037247658, -37.78523350850345], + [175.28329387307173, -37.78427747950558], + [175.28419509530067, -37.783713607380655], + [175.2855844795704, -37.78294940539801], + [175.2870167791844, -37.78218625544975], + [175.28815001249313, -37.781625547202424], + [175.288654267788, -37.78135420104129], + [175.2902139723301, -37.78070657004482], + [175.2917240560055, -37.780091792327], + [175.29382020235064, -37.77926183429722], + [175.2950593829155, -37.77880286244531], + [175.2964594960213, -37.77847108656074], + [175.29785022139552, -37.77825166846443], + [175.29813185334206, -37.77987025910305], + [175.29819354414943, -37.780203088672195], + [175.29845528315494, -37.78016051346656], + [175.29873847983256, -37.77820149530238], + [175.3031712774828, -37.77769835037304], + [175.3028351068497, -37.78098427830287], + [175.30275106451884, -37.78253462007369], + [175.3029710050032, -37.7836736838543], + [175.3030496835709, -37.78404535973451], + [175.30535101890564, -37.790063986058705], + [175.3004045783928, -37.789852374607314], + [175.29971882706377, -37.78997990687238], + [175.30124723911288, -37.79154207139724], + [175.30148506186381, -37.791861424481304], + [175.30125975597915, -37.792334798872844], + [175.3005301954545, -37.79249447361553], + [175.30034601672017, -37.792794039609525], + [175.30059009790423, -37.79357262369703], + [175.3016129133903, -37.79333170148264], + [175.30186951160434, -37.79420707136019], + [175.301098823602, -37.794513695816676], + [175.3012186289925, -37.794854230627415], + [175.3010004758835, -37.79504781249807], + [175.30115425597614, -37.79544345164977], + [175.30249178409582, -37.795662465805016], + [175.30274569977342, -37.796432541294244], + [175.30216455459595, -37.796597859242894], + [175.30224144447857, -37.79755584827722], + [175.30347168456504, -37.79764062546951], + [175.3033930063248, -37.79810972426119], + [175.30258163807733, -37.79879146622999], + [175.30303180195915, -37.799696442595824], + [175.30424058437353, -37.79975013335927], + [175.30436217763054, -37.80004260531471], + [175.30357003211978, -37.80053288257068], + [175.3034162516997, -37.80122237295114], + [175.30423521995547, -37.80181295570812], + [175.30482172987834, -37.801821432943136], + [175.30518114588637, -37.80216617369982], + [175.30903458638932, -37.80611079009724], + ], + ], + }, + properties: {}, + }, + name: 'Wednesday A Run', + frequencyInDays: 7, + frequencyOffset: 2, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.30304789455843, -37.78049811167907], + [175.3028440473281, -37.78264202650545], + [175.30314981981067, -37.784093408854005], + [175.30540823914635, -37.790120510101644], + [175.30545651890864, -37.79029290720818], + [175.30631840250862, -37.792613169789036], + [175.30941188335422, -37.80127535607774], + [175.31028896570209, -37.80154027114065], + [175.31218260526663, -37.80233077204055], + [175.31326621770864, -37.80275251034655], + [175.31354516744614, -37.80249607678917], + [175.3138080239296, -37.802799134534055], + [175.3147467970848, -37.80239011224929], + [175.31456708908084, -37.80218030201183], + [175.31480044126516, -37.80206374051125], + [175.31469851732257, -37.801822139724294], + [175.31636685132986, -37.80173948664206], + [175.31658679246905, -37.80197472978694], + [175.3170166164637, -37.801751407765394], + [175.3167369961739, -37.801472452975034], + [175.3171366453171, -37.80122025362528], + [175.31677454710007, -37.80083453530707], + [175.31704276800156, -37.8006480334198], + [175.31729221343997, -37.80084089331765], + [175.317654311657, -37.800690420253716], + [175.31849116086966, -37.80104011070537], + [175.31913042063027, -37.800617656087624], + [175.31951129431042, -37.80012031530105], + [175.32186895587077, -37.80032871844606], + [175.32249659278025, -37.80000375054959], + [175.32270938147124, -37.800144334767765], + [175.3260290620529, -37.79928740312912], + [175.32629013148838, -37.79965970650188], + [175.3266119952605, -37.79954949966271], + [175.3265011314943, -37.79934603897806], + [175.3291475772858, -37.79854632222953], + [175.32995939342075, -37.79830329734257], + [175.33106088681967, -37.797653342039965], + [175.3409528723569, -37.791913707446106], + [175.35501480102542, -37.78058008102369], + [175.33562958262337, -37.76656185492084], + [175.3292012214661, -37.7718511574352], + [175.32825171947482, -37.772455402930326], + [175.3269562125206, -37.77330769870751], + [175.32526910305026, -37.773869530103184], + [175.32317429780963, -37.77458400386213], + [175.31966328620913, -37.77578184496937], + [175.3162005543709, -37.776201614782764], + [175.3134968876839, -37.77650690042271], + [175.31000465154648, -37.776886385675965], + [175.3065499663353, -37.77728706910843], + [175.30334114984728, -37.77772520388191], + [175.30304789455843, -37.78049811167907], + ], + ], + }, + properties: {}, + }, + name: 'Wednesday B Run', + frequencyInDays: 7, + frequencyOffset: 2, + }, + { + color: 'purple', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.33101260640254, -37.80635378903725], + [175.33146858128023, -37.80993935240282], + [175.32838225321032, -37.812838191031794], + [175.32284259970766, -37.81260933948846], + [175.3206539158419, -37.80833448822579], + [175.31567037083735, -37.80753345586353], + [175.31403779939868, -37.8051161786351], + [175.3092527385161, -37.806048626949945], + [175.3052186961577, -37.80214921879052], + [175.3049254421785, -37.80177198256423], + [175.30423521995547, -37.80174089968851], + [175.3034859897889, -37.80123932755578], + [175.3036308284209, -37.8005653790237], + [175.30442833900455, -37.80008216685719], + [175.3042817113601, -37.79972046220036], + [175.30306220076457, -37.79965970701931], + [175.30263125880086, -37.79880700841944], + [175.3034117817879, -37.79813657009508], + [175.30351281155166, -37.79762084404711], + [175.30225843191147, -37.79753253453227], + [175.3021904825073, -37.79661057599286], + [175.3027832506996, -37.796460094311385], + [175.30253827550044, -37.79565963947953], + [175.3011703492303, -37.79541943091323], + [175.30103713295833, -37.79506547484829], + [175.30128300168144, -37.794823144581976], + [175.30115246794597, -37.79454336880822], + [175.30194818951713, -37.79424946189001], + [175.30165135816787, -37.79329990819263], + [175.3006374838151, -37.79351045025583], + [175.30041754202108, -37.7928166485], + [175.3005748985015, -37.79252697360474], + [175.30131697654727, -37.792361646805325], + [175.30160129070285, -37.791844466689284], + [175.29978632905113, -37.790007461972536], + [175.30042648315433, -37.78990006663306], + [175.30527591705325, -37.79018127235165], + [175.30927419618823, -37.80130149475114], + [175.31326889983578, -37.80277794159585], + [175.3135389088493, -37.80253069179613], + [175.31378924846652, -37.80283657506193], + [175.31478524186247, -37.80239293792921], + [175.31461179255245, -37.802191604892315], + [175.31484156841546, -37.80206162120953], + [175.3147360682283, -37.80184262641451], + [175.31634896996366, -37.80176350545289], + [175.31658768648416, -37.802010051558895], + [175.31705260274066, -37.80175856043847], + [175.31676113605502, -37.801476691612216], + [175.31717956066134, -37.801230143725846], + [175.31682014473517, -37.80084230625193], + [175.31705439085277, -37.80068476863414], + [175.31731009488794, -37.80085714160755], + [175.3176426886421, -37.80072291663743], + [175.31849920733296, -37.80107048773444], + [175.31914293782395, -37.80065439131708], + [175.31952738772816, -37.80014009592477], + [175.32188326109465, -37.80035415078862], + [175.32248854615324, -37.80004331198353], + [175.3227210041587, -37.80018460253515], + [175.3260254866473, -37.79933191012392], + [175.32627940265232, -37.799716223462404], + [175.3267085560947, -37.7995834096361], + [175.32659053889805, -37.79937147204704], + [175.33000946088578, -37.79833720788804], + [175.33107161565567, -37.79769290486209], + [175.33292055151836, -37.79813091840947], + [175.3329241282336, -37.8011093423265], + [175.3329294919968, -37.80470222397696], + [175.33290803432467, -37.80567706174925], + [175.33101260640254, -37.80635378903725], + ], + ], + }, + properties: {}, + }, + name: 'Wednesday C Run', + frequencyInDays: 7, + frequencyOffset: 2, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.46297907829285, -37.87979903327807], + [175.48307418823245, -37.87871508585692], + [175.4834818840027, -37.88512256394925], + [175.48105716705325, -37.893499430817556], + [175.47907948515788, -37.892548332047525], + [175.47504007838143, -37.89296743799188], + [175.4747647047043, -37.891299466595434], + [175.4742515095859, -37.89124302029671], + [175.46742260477913, -37.89151678468696], + [175.46632021687404, -37.88936685886997], + [175.4644140601158, -37.889474109869454], + [175.46444982268443, -37.88967802695751], + [175.4633447528977, -37.88972741868263], + [175.46341985475013, -37.89009997169204], + [175.46216547511, -37.89018111470256], + [175.4620519278251, -37.8897217740117], + [175.45990347873163, -37.88986007039213], + [175.45696645936917, -37.890005422389486], + [175.4566812515259, -37.8900138893631], + [175.45104503555197, -37.890231212024496], + [175.45491099357608, -37.896225658209026], + [175.45141339302066, -37.895590676376536], + [175.44795870780948, -37.89531974912757], + [175.44457912445068, -37.89548907877503], + [175.43970823287964, -37.89673363973121], + [175.43690800666812, -37.89760566650316], + [175.4335713386536, -37.898181367836614], + [175.4304599761963, -37.897673396305514], + [175.4202997684479, -37.894540827732385], + [175.41731715202334, -37.89211934554206], + [175.4152357578278, -37.88896113895298], + [175.40898084640503, -37.88631931786157], + [175.39992570877075, -37.88416853454584], + [175.39653539657596, -37.88283061402031], + [175.39541959762576, -37.879155441420664], + [175.3941535949707, -37.876445519279336], + [175.39218664256626, -37.87382018713179], + [175.3987312325626, -37.86136411013617], + [175.40488958184142, -37.8667680246762], + [175.407807827869, -37.86802154277233], + [175.41077613743258, -37.86863135056867], + [175.411033629498, -37.86092929587994], + [175.4452657699585, -37.86136975775596], + [175.4550290107727, -37.86126811294129], + [175.45874118804934, -37.8678408557141], + [175.46297907829285, -37.87979903327807], + ], + ], + }, + properties: {}, + }, + name: 'Cambridge A Run', + frequencyInDays: 14, + frequencyOffset: 2, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.5298805271741, -37.91862995683153], + [175.5133438180201, -37.931945147693575], + [175.47221302942492, -37.927699779097544], + [175.47431588194743, -37.919797950807784], + [175.47054290771487, -37.91704720467685], + [175.47805309295654, -37.90069013171825], + [175.47527432441714, -37.89986893732022], + [175.47184824965373, -37.90055749991097], + [175.46577572778918, -37.89645425032782], + [175.4609441758294, -37.89533103812735], + [175.45511841784906, -37.89615510459808], + [175.4514312752872, -37.890445710832275], + [175.4620286822865, -37.889747880966425], + [175.46214669931945, -37.89021992198426], + [175.4634806513787, -37.890122550515585], + [175.46337604522705, -37.88975211440357], + [175.4644989966255, -37.88969284445268], + [175.46447843313223, -37.88949386641148], + [175.4663148521286, -37.8893936717637], + [175.46730279922488, -37.89161415431171], + [175.4746592040465, -37.89139119196466], + [175.4748952384398, -37.893073271932224], + [175.47905445142538, -37.892661222600104], + [175.4810857768462, -37.893815520154924], + [175.49137830734253, -37.8951983961462], + [175.48776626761537, -37.90565096587734], + [175.5298805271741, -37.91862995683153], + ], + ], + }, + properties: {}, + }, + name: 'Cambridge B Run', + frequencyInDays: 14, + frequencyOffset: 2, + }, + { + color: 'red', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.48356056388002, -37.87835376581581], + [175.48398256389194, -37.88513385451268], + [175.48224806654616, -37.890231211507704], + [175.48143625434022, -37.89374214218175], + [175.4913854581537, -37.895040354357704], + [175.4985952412244, -37.86533379618574], + [175.48800945281985, -37.86045494937737], + [175.46524286270144, -37.868594650852714], + [175.4614877700806, -37.86863699866542], + [175.4592025279999, -37.86864546822504], + [175.46085476875305, -37.87337133065719], + [175.46314001083377, -37.87962119924818], + [175.48356056388002, -37.87835376581581], + ], + ], + }, + properties: {}, + }, + name: 'Cambridge C Run', + frequencyInDays: 14, + frequencyOffset: 2, + }, + { + color: 'purple', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.46563625423008, -37.89653609246703], + [175.46080112457278, -37.89546367935274], + [175.45487880706787, -37.896468361618176], + [175.45148849487305, -37.895760005400774], + [175.44809818267822, -37.89558785491184], + [175.44427871704104, -37.89583620333461], + [175.4371547698975, -37.89776652468199], + [175.43619632726404, -37.8980713076357], + [175.43354988196373, -37.89845510694147], + [175.43067812963278, -37.897989468236936], + [175.42026042949146, -37.894797647273734], + [175.41712760936213, -37.89225763780649], + [175.41501045325276, -37.889186930886076], + [175.40883064368245, -37.886578987893024], + [175.40566921245045, -37.88583384435777], + [175.39992213271037, -37.88436329310196], + [175.3962135324764, -37.88298868150193], + [175.39516568227555, -37.879152618809265], + [175.39371013826897, -37.876496330202855], + [175.3913748264313, -37.87477435058419], + [175.38706541105063, -37.87344755176075], + [175.38034200690166, -37.87030830658298], + [175.37660837195293, -37.8700034087043], + [175.3732001781464, -37.871485538812244], + [175.37617921810127, -37.87703832326692], + [175.37495613119975, -37.884747163875105], + [175.38061380345428, -37.89619743759956], + [175.38330316459, -37.90150285013976], + [175.41793584869993, -37.90439244800558], + [175.42794227250855, -37.93282521519654], + [175.47331810346805, -37.9230987088173], + [175.47418355919945, -37.919828983935425], + [175.47038555156178, -37.91709516694108], + [175.4746896028519, -37.90763323182203], + [175.47789216041568, -37.900740926430394], + [175.47522068023682, -37.899980405481315], + [175.47187507108902, -37.90066896677086], + [175.46564340397708, -37.89651351586884], + [175.46563625423008, -37.89653609246703], + ], + ], + }, + properties: {}, + }, + name: 'Cambridge D Run', + frequencyInDays: 14, + frequencyOffset: 2, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.72560311062264, -37.70505550417933], + [175.66580772399902, -37.745901402389386], + [175.53062438964847, -37.91797541787165], + [175.53869247436523, -37.92695787471252], + [175.57943344814706, -37.947942743746296], + [175.62526703579354, -37.954575426242045], + [175.66268921596932, -37.95051468782931], + [175.82476615905765, -37.84320699276033], + [175.80734252929688, -37.73773402326087], + [175.72560311062264, -37.70505550417933], + ], + ], + }, + properties: {}, + }, + name: 'Matamata', + frequencyInDays: 14, + frequencyOffset: 9, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.51854729696066, -37.64397381588046], + [175.51866173744205, -37.64557091870219], + [175.5209505549283, -37.64828931261939], + [175.51996350332053, -37.648841473189655], + [175.5208718776703, -37.64975890289717], + [175.5255246171146, -37.64559923564545], + [175.52603602453019, -37.64576630606498], + [175.52639543988337, -37.645985762941436], + [175.5270338056289, -37.645997089812326], + [175.53148269653323, -37.64553410608947], + [175.53444743167347, -37.6451419128135], + [175.53446710098802, -37.645379777367715], + [175.53566157817843, -37.64756583242964], + [175.5362910031181, -37.64895190579622], + [175.53608179081496, -37.64908640544672], + [175.53731560532472, -37.651030258215016], + [175.53329765818495, -37.65273338860505], + [175.53430795713217, -37.65479040103014], + [175.53200751564876, -37.656018493376855], + [175.53144514560705, -37.655404803573276], + [175.53006291411296, -37.65626552544112], + [175.53047955036166, -37.656827536889125], + [175.52924707531932, -37.65752190578695], + [175.52666544914248, -37.65856167736074], + [175.5171847334714, -37.661899607736075], + [175.50632000056797, -37.66562522771769], + [175.50503254024082, -37.665800745905855], + [175.50119876774264, -37.66536477940514], + [175.49809455871585, -37.66581773154838], + [175.48848152160645, -37.6676295124798], + [175.48298835754395, -37.66837685658593], + [175.47234535217285, -37.68162401898024], + [175.45652390224862, -37.70641362846161], + [175.43952942593026, -37.718567695557105], + [175.4184436798096, -37.73513203544036], + [175.40542602364442, -37.77693373010094], + [175.39501190185547, -37.78014479060752], + [175.36600112915042, -37.77327024036875], + [175.35547256295106, -37.77939857131877], + [175.3373336785444, -37.76691524504896], + [175.48204421997073, -37.63476148669231], + [175.51854729696066, -37.64397381588046], + ], + ], + }, + properties: {}, + }, + name: 'Morrinsville A', + frequencyInDays: 14, + frequencyOffset: 9, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.45383452670652, -37.7614644275084], + [175.40714263217527, -37.77444619788819], + [175.4192161560059, -37.73578820234094], + [175.45732498168948, -37.70716058807404], + [175.48305988399082, -37.668931701021755], + [175.50047278426067, -37.66555162287498], + [175.50573348999026, -37.6659663557547], + [175.51762461705948, -37.66187412740755], + [175.52469849608318, -37.65934733613206], + [175.52931010723117, -37.65755729665728], + [175.53060293197635, -37.656826121074836], + [175.5301380166202, -37.656262693791064], + [175.5314075946808, -37.65552088848084], + [175.53205847827488, -37.656155104487944], + [175.5344474320009, -37.65484702839407], + [175.5333656072617, -37.65277019764946], + [175.53737059241033, -37.65103521382213], + [175.53614258768906, -37.6491625044154], + [175.5363732580736, -37.64901703209599], + [175.53550064563754, -37.64719205584873], + [175.53455293200392, -37.6453981837146], + [175.53452610958632, -37.645090941942186], + [175.53171157815086, -37.64546472916145], + [175.52646160125735, -37.64591638643528], + [175.5262684825357, -37.645728078041685], + [175.52551746368408, -37.64548313522804], + [175.52087187701548, -37.649591841458495], + [175.52017092726604, -37.64887828416317], + [175.52112579367534, -37.64829214405501], + [175.51883697553427, -37.64557941383815], + [175.51875829696658, -37.64380674091069], + [175.51664829297806, -37.64323471944154], + [175.5279833076202, -37.638004198302895], + [175.52937984466556, -37.637069632229796], + [175.54295539899613, -37.634486770311], + [175.561230182866, -37.63437348501112], + [175.56135892889873, -37.641141989043014], + [175.5817794799805, -37.65508203043008], + [175.56919098249637, -37.67589519682823], + [175.54564476013184, -37.685597710014804], + [175.52206992928407, -37.691257863654194], + [175.49471855338197, -37.72229046853262], + [175.49095630994998, -37.75036932109197], + [175.45383452670652, -37.7614644275084], + ], + ], + }, + properties: {}, + }, + name: 'Morrinsville B', + frequencyInDays: 14, + frequencyOffset: 9, + }, + { + color: 'yellow', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.48785210354256, -37.86001449030634], + [175.46456337324344, -37.86814577081273], + [175.45941353193484, -37.86791991126758], + [175.45512199751101, -37.86024036994521], + [175.41076183319095, -37.860720357226384], + [175.41065454483035, -37.86835468014353], + [175.4071855527582, -37.86721974600005], + [175.40511131461244, -37.86624855227631], + [175.39835929870605, -37.86051142625249], + [175.39114952087405, -37.87388229835735], + [175.38039207633122, -37.86959123648448], + [175.37615776149326, -37.869139529698884], + [175.37255287257724, -37.870720489265345], + [175.3609371185303, -37.87293376905065], + [175.34726143057924, -37.85920130877539], + [175.34434318717106, -37.851249769585976], + [175.34680366865365, -37.83877858122043], + [175.33793449401858, -37.835886408858904], + [175.33078194363046, -37.83337825839438], + [175.32368659973145, -37.826508665706385], + [175.32414436689578, -37.81746875314057], + [175.32279968960214, -37.812722355099154], + [175.32846450980287, -37.81290882442582], + [175.33164024527653, -37.80995913078478], + [175.33146858389958, -37.80878374040932], + [175.3310823431821, -37.806421602979356], + [175.33320665272188, -37.80577171963037], + [175.33362150061294, -37.79663318423286], + [175.34139633004088, -37.7919758824897], + [175.3541708010016, -37.78206118290115], + [175.35525798884922, -37.78027481427491], + [175.36668777465823, -37.77397694964317], + [175.3775596636115, -37.77648711411882], + [175.38630008697513, -37.77906503216347], + [175.38928270339966, -37.779913011870505], + [175.3946900367737, -37.78096449319738], + [175.39855241775513, -37.77967557853309], + [175.41797876445347, -37.77353596496641], + [175.4316115396796, -37.76923906395871], + [175.44392109266488, -37.76562608130098], + [175.45399189169984, -37.76250486498014], + [175.4779100453016, -37.78322004184905], + [175.47633647918704, -37.790138883110174], + [175.48799516167492, -37.859946717594056], + [175.48785210354256, -37.86001449030634], + ], + ], + }, + properties: {}, + }, + name: 'Matangi', + frequencyInDays: 14, + frequencyOffset: 9, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.27884721777812, -37.75076661224596], + [175.27645826339725, -37.75124449144851], + [175.27557134606468, -37.751111590515926], + [175.27510464169612, -37.7525734884225], + [175.27502775148608, -37.75292835496253], + [175.2746504545212, -37.75322525387512], + [175.27470991004523, -37.75344050484178], + [175.27395486853496, -37.75344015121984], + [175.27440190293424, -37.755023587539704], + [175.27228116945483, -37.755572127331355], + [175.2735328678682, -37.75715834530335], + [175.2747452256881, -37.7609131068588], + [175.27008712313545, -37.762970652657756], + [175.26877105236056, -37.76531226937633], + [175.26950776566082, -37.76673218730985], + [175.26881933212283, -37.76699369232948], + [175.268238186618, -37.76701772256432], + [175.2677232027327, -37.76733011374197], + [175.26745140552524, -37.76775487883], + [175.26655554771426, -37.76580843089935], + [175.26649653911593, -37.76407821195334], + [175.2647906541825, -37.762186798856774], + [175.2632832527161, -37.76085090620296], + [175.26283800601962, -37.7600196719307], + [175.26289701461795, -37.75911633021707], + [175.26390552520755, -37.75803909140534], + [175.26504278182983, -37.75728416666912], + [175.26641070842746, -37.75616448882946], + [175.26673793792727, -37.75563433242583], + [175.2665287256241, -37.75427287337665], + [175.26617467403415, -37.75201220223661], + [175.265246629715, -37.75006958161552], + [175.26421129703525, -37.74903462753639], + [175.26280045509338, -37.748063284516114], + [175.26185631752017, -37.747690013938204], + [175.25961935520175, -37.74569637783472], + [175.25506317610058, -37.742895298901985], + [175.2570033073971, -37.7427312750238], + [175.25837302251603, -37.743622090926095], + [175.2619922156737, -37.74496253655533], + [175.26556134267594, -37.744744785800286], + [175.2666592593596, -37.746342548733296], + [175.26849746704104, -37.74593250616037], + [175.26851177259232, -37.74514069522262], + [175.2701497077942, -37.74425838143511], + [175.2728641033173, -37.74457793877768], + [175.27484089136126, -37.745053736879065], + [175.27510374784475, -37.74592331571689], + [175.27522176504135, -37.746264781686286], + [175.27606666088107, -37.74697315948038], + [175.2780783176422, -37.74867196602247], + [175.2796018123627, -37.74994021314722], + [175.27974128723145, -37.75010563508278], + [175.27996391057968, -37.750396182971876], + [175.27884721777812, -37.75076661224596], + ], + ], + }, + properties: {}, + }, + name: 'Thursday A Run', + frequencyInDays: 7, + frequencyOffset: 3, + }, + { + color: 'green', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.2927875518799, -37.74558467589448], + [175.29216885610367, -37.743373229322664], + [175.29099583625796, -37.74359239723541], + [175.28964400291446, -37.74086476884459], + [175.28596401214602, -37.741000516335546], + [175.28208017349246, -37.73974484254553], + [175.28013825416565, -37.738395817916114], + [175.2762544155121, -37.73870974408122], + [175.27492403984073, -37.741543503809936], + [175.27565360069278, -37.74357118744906], + [175.27463078455187, -37.74430080060788], + [175.2734738592699, -37.74437432703014], + [175.2723062038422, -37.74428524708663], + [175.2701264624193, -37.743923269115506], + [175.26787161848918, -37.74331525563427], + [175.26536464691165, -37.742141633839275], + [175.2623516323365, -37.73979857659746], + [175.2610820536211, -37.73965858472035], + [175.25884866687193, -37.740156332586565], + [175.254889726366, -37.74263370882117], + [175.2550506592343, -37.74281894318246], + [175.25701940048748, -37.742656333052224], + [175.2584427592956, -37.74357401550723], + [175.2620494365692, -37.74487345706087], + [175.26561498652887, -37.74468540008455], + [175.26668608210463, -37.74624357266574], + [175.2684187888008, -37.74585332560315], + [175.26843488205492, -37.74511100240008], + [175.27007281758418, -37.74418485489767], + [175.2728354928695, -37.744514309988745], + [175.27490973494426, -37.74503182070506], + [175.27525663375857, -37.74621812183308], + [175.27556240558627, -37.74651080588027], + [175.278350115259, -37.74877447321357], + [175.27945518537314, -37.747468029991275], + [175.2829957006179, -37.746738448314154], + [175.2860659360886, -37.74763345745318], + [175.28656303893513, -37.747048096951595], + [175.2889072896142, -37.74642738414771], + [175.2927875518799, -37.74558467589448], + ], + ], + }, + properties: {}, + }, + name: 'Thursday B Run', + frequencyInDays: 7, + frequencyOffset: 3, + }, + { + color: 'red', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.26779651663676, -37.76736474522351], + [175.26748895645144, -37.767788803394154], + [175.2680951356888, -37.76915849467351], + [175.26961863040927, -37.77099036409687], + [175.27127087116244, -37.77233031391736], + [175.2746289968491, -37.7747133292348], + [175.27971982955933, -37.77810538937172], + [175.2812594175339, -37.780246547199596], + [175.28330862522128, -37.784164069480205], + [175.28653264045715, -37.78240036220268], + [175.28872668743136, -37.78128530439649], + [175.2914035320282, -37.780091085337965], + [175.29197931376987, -37.77401652462451], + [175.2926123150974, -37.77363490400534], + [175.2926337727695, -37.773388970524884], + [175.2935296295982, -37.77268508555208], + [175.29250681443958, -37.772280843800374], + [175.285023451288, -37.773975536065734], + [175.28471231438743, -37.77472604975105], + [175.28299927733318, -37.771661755146496], + [175.2816724777222, -37.769220688342884], + [175.27924418454865, -37.764917882085385], + [175.27782261327957, -37.765238763694406], + [175.2761256694794, -37.762845547535555], + [175.2747738361359, -37.76097530785061], + [175.2701139445708, -37.76303355835969], + [175.26883363701927, -37.765334886350196], + [175.26959717273715, -37.76674702964325], + [175.26887476400594, -37.767053767513694], + [175.2682560682297, -37.767043872838684], + [175.26779651663676, -37.76736474522351], + ], + ], + }, + properties: {}, + }, + name: 'Thursday C Run', + frequencyInDays: 7, + frequencyOffset: 3, + }, + { + color: 'purple', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.3031444567023, -37.77167588929157], + [175.3031945228577, -37.777482109993414], + [175.30777573585513, -37.777100507780425], + [175.3151571750641, -37.776252495810304], + [175.31960427761084, -37.77572672350371], + [175.32690525054934, -37.77326741629248], + [175.33043503761292, -37.7709394794479], + [175.3289866447449, -37.7688150141044], + [175.32692134380343, -37.76700005325191], + [175.32443761825562, -37.765134159211804], + [175.32071471214294, -37.76259816411688], + [175.3171473741532, -37.76030806038947], + [175.31502842903137, -37.75871342777345], + [175.3149425983429, -37.758263870877705], + [175.31155228614807, -37.75422621857273], + [175.30735731190364, -37.74847755644232], + [175.3035593032837, -37.7421684997413], + [175.29442191036654, -37.74422161830498], + [175.29288768811972, -37.743311013402696], + [175.29231190637805, -37.74339302500378], + [175.29291272185222, -37.745632750266225], + [175.28934717156523, -37.74638355208894], + [175.2867347003121, -37.74705092500635], + [175.29353499368884, -37.75038628632744], + [175.29409646987918, -37.75077933649293], + [175.29476523377525, -37.75312204637926], + [175.2960634235933, -37.75582518942766], + [175.29519438787253, -37.756243658612675], + [175.29531061605667, -37.75656599150404], + [175.29603302478793, -37.75635817151553], + [175.3019785889774, -37.76848142668267], + [175.3031444567023, -37.77167588929157], + ], + ], + }, + properties: {}, + }, + name: 'Thursday D Run', + frequencyInDays: 7, + frequencyOffset: 3, + }, + { + color: 'orange', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.28532743459436, -37.75541378624299], + [175.28582274924705, -37.754663076039186], + [175.2878969909944, -37.752980665723484], + [175.28893679391334, -37.753190615773185], + [175.28953760873267, -37.754107460757304], + [175.2905921638012, -37.75379960859523], + [175.29207944869998, -37.75174357540349], + [175.2902197833464, -37.74963976620575], + [175.29052734418656, -37.749195809860964], + [175.28656482565572, -37.74721918183731], + [175.28602123173192, -37.747886546960366], + [175.28301715763521, -37.7468798411556], + [175.27952670920064, -37.74758114317091], + [175.2783644195006, -37.74884799570917], + [175.27970910006843, -37.749899918781004], + [175.28009533947625, -37.75043152900739], + [175.27839303060324, -37.75096879226034], + [175.27639746709616, -37.75133639077705], + [175.27558922800384, -37.751178040623735], + [175.2750813963212, -37.75300470051098], + [175.27467012416315, -37.753247874609514], + [175.27478456513566, -37.75350235862458], + [175.27399778377688, -37.75347691028836], + [175.2743768693654, -37.754805866906324], + [175.27445912382976, -37.75506882816084], + [175.27234554302368, -37.75560040228665], + [175.273600816945, -37.757144208124764], + [175.27395844475907, -37.75824973391038], + [175.27477920055392, -37.76085938813719], + [175.27543365955356, -37.76177543130884], + [175.2774238586426, -37.76455600556269], + [175.27786910533908, -37.76518646160101], + [175.27928888808677, -37.7648556847973], + [175.28100907824415, -37.76794287743312], + [175.28340876146106, -37.77230063174761], + [175.2846944334306, -37.774584710645044], + [175.28499841690066, -37.773894971423836], + [175.28577446981217, -37.77372818935976], + [175.2920705082215, -37.77231476615878], + [175.29255688190463, -37.772220065798265], + [175.29314339182753, -37.77242642761435], + [175.29259979724887, -37.771832782727294], + [175.29221713586597, -37.771451151483866], + [175.29094755715053, -37.76878533065294], + [175.29028236931484, -37.767431180991665], + [175.28978526614083, -37.76541263257779], + [175.2891826629639, -37.76308303518074], + [175.2881169321336, -37.76220376241788], + [175.28828859351054, -37.7621500446336], + [175.28809010938863, -37.761535112352384], + [175.28508424758914, -37.75927890392079], + [175.2842473983765, -37.758982029818924], + [175.2844512462616, -37.75693497611621], + [175.28607845262744, -37.75606128569251], + [175.28599977536942, -37.75568805545953], + [175.28532743459436, -37.75541378624299], + ], + ], + }, + properties: {}, + }, + name: 'Thursday E Run', + frequencyInDays: 7, + frequencyOffset: 3, + }, + { + color: 'black', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.30301213264468, -37.77166175592293], + [175.3017389778688, -37.76813370267783], + [175.29849886806917, -37.76154076735442], + [175.295945405087, -37.75642320429284], + [175.2952462430403, -37.756611231182], + [175.29508531049947, -37.756209728882375], + [175.29600083827975, -37.75579408683777], + [175.2947670221329, -37.75339632383255], + [175.29399454659148, -37.75078640633788], + [175.29068112395183, -37.74906714689188], + [175.29025197050944, -37.749629869330704], + [175.29213666959555, -37.75176478258887], + [175.29112100623027, -37.753198391986736], + [175.2906042337418, -37.75382753086671], + [175.28953492685108, -37.754164012294346], + [175.28889834880835, -37.75321394363476], + [175.28790056705478, -37.75300752820866], + [175.28585851214308, -37.75467297262936], + [175.28537571435666, -37.7553996489899], + [175.28603732596824, -37.75567674507793], + [175.2861195802689, -37.75609238778177], + [175.285958647728, -37.75617438521879], + [175.28448879686042, -37.75696325080951], + [175.28429567781134, -37.75894810121511], + [175.28512537490315, -37.75924214902925], + [175.28817951711974, -37.761563386063976], + [175.28841555151305, -37.7623748121319], + [175.2892595531739, -37.763105652901004], + [175.29007494471443, -37.76637385332457], + [175.2903360128403, -37.76745379807782], + [175.29230296655444, -37.77146245924322], + [175.29358863852397, -37.77269215239774], + [175.29269278071303, -37.773408758175556], + [175.2926784751617, -37.77367023945521], + [175.29202938058006, -37.77403348521339], + [175.29182553236757, -37.776651062389014], + [175.29150187925558, -37.780074126397565], + [175.29318451881412, -37.77941411771705], + [175.29593825362102, -37.77848839988867], + [175.29789090178386, -37.77814354916294], + [175.29822170734408, -37.78012500450098], + [175.29836297024306, -37.78011723150639], + [175.29871165741503, -37.77810185614299], + [175.30315384279677, -37.77755878357686], + [175.30301213264468, -37.77166175592293], + ], + ], + }, + properties: {}, + }, + name: 'Thursday F Run', + frequencyInDays: 7, + frequencyOffset: 3, + }, + { + color: 'cyan', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.274181961795, -37.726294505292174], + [175.27388513110054, -37.72606114253438], + [175.27352035067457, -37.72598618368775], + [175.27305185794833, -37.72713884684019], + [175.271567702348, -37.72693801633841], + [175.27157574881136, -37.72641967201817], + [175.27106076484418, -37.726218132191676], + [175.27043581030742, -37.72675839938344], + [175.264323949159, -37.72619974563152], + [175.26349425272204, -37.72302312052075], + [175.26286125139453, -37.7230188771264], + [175.2610677480698, -37.72297927460423], + [175.25744140148166, -37.72329468067879], + [175.25451421694018, -37.723451675682014], + [175.25621831417087, -37.725998912583165], + [175.25722682476047, -37.727458478082944], + [175.25832116603854, -37.72957988827563], + [175.2588057518551, -37.73053733133324], + [175.2595210079744, -37.73179174690927], + [175.2658277752198, -37.730626428079574], + [175.26643574232367, -37.73051328950833], + [175.26822388172153, -37.730247412822955], + [175.26947289727107, -37.73024882710656], + [175.27086138703459, -37.730378937198104], + [175.2733513715066, -37.73055571655755], + [175.2730911965773, -37.72917823944882], + [175.2733755107329, -37.72769325064072], + [175.2738757430416, -37.72754934635545], + [175.27422130107885, -37.726742842176485], + [175.27479708151077, -37.72673294185149], + [175.27491241715327, -37.726246418361725], + [175.274181961795, -37.726294505292174], + ], + ], + }, + properties: {}, + }, + name: 'Friday A Run', + frequencyInDays: 7, + frequencyOffset: 4, + }, + { + color: 'yellow', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.27459681034094, -37.744169301093144], + [175.27407646179202, -37.7425870466788], + [175.27424812316895, -37.741000516335546], + [175.27368485927585, -37.73841702918542], + [175.27305185794833, -37.73558739218899], + [175.27307331562045, -37.73532011953632], + [175.27390480041507, -37.73282977082498], + [175.27390480041507, -37.7326855231885], + [175.27341306198656, -37.73063208518487], + [175.2762186526161, -37.73072542451132], + [175.27622401703417, -37.730523189069004], + [175.27726292610168, -37.73060804313178], + [175.27890622637648, -37.72799166472279], + [175.28061926343074, -37.72475571347069], + [175.281997025013, -37.724904926709684], + [175.2824476361275, -37.72447496543349], + [175.28308331999145, -37.724433949295225], + [175.28304308669252, -37.724819358983254], + [175.2836340666909, -37.72499261584849], + [175.28479367483675, -37.725113542018306], + [175.28649866559138, -37.725146071753464], + [175.28748214244848, -37.72659292526933], + [175.28858184945423, -37.726459979814074], + [175.33540248870852, -37.713249121250946], + [175.35055160522464, -37.72840039170947], + [175.3077220916748, -37.74781302376846], + [175.3071588277817, -37.7473846093377], + [175.30711591243747, -37.747270082287585], + [175.30694961547852, -37.74704102765563], + [175.3068315982819, -37.74684590648405], + [175.30658483505252, -37.74645566259756], + [175.30633807182315, -37.74599754758339], + [175.3036263585091, -37.74206103503908], + [175.29677331447604, -37.74363057483553], + [175.29449075460434, -37.74412900280799], + [175.2928948402405, -37.7431766843151], + [175.29104411602023, -37.7434863482429], + [175.29049158096316, -37.74247675422536], + [175.2896869182587, -37.740822347702604], + [175.28597474098208, -37.740941126838884], + [175.2821230888367, -37.73971090512044], + [175.28019189834598, -37.73832794181349], + [175.2761793136597, -37.738658837225955], + [175.27482748031616, -37.741564714177336], + [175.27552485466006, -37.74356694549107], + [175.27459681034094, -37.744169301093144], + ], + ], + }, + properties: {}, + }, + name: 'Friday B Run', + frequencyInDays: 7, + frequencyOffset: 4, + }, + { + color: 'red', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.24485468842613, -37.73646698126301], + [175.2467572684691, -37.737346559886944], + [175.24959325790408, -37.74016623059902], + [175.2547931679874, -37.74257290697506], + [175.25568366138035, -37.7419054929015], + [175.25451779365542, -37.73964868664001], + [175.2537399530411, -37.738728127276296], + [175.25358974933627, -37.735156078355004], + [175.25043189481954, -37.735734462529784], + [175.24958074136524, -37.734412233537846], + [175.24845778898458, -37.73427506014751], + [175.24818062825943, -37.733302108812914], + [175.2480304245546, -37.73252147668793], + [175.24835854757842, -37.731937410926705], + [175.2489495277405, -37.73172103650004], + [175.24924814679255, -37.73059955772988], + [175.25007605552673, -37.73065188427313], + [175.2502870555327, -37.73030963933374], + [175.24943590207846, -37.72950069022644], + [175.24722218513492, -37.73070845373345], + [175.24611711502078, -37.73070845373345], + [175.24510860443118, -37.73018660041519], + [175.2440840005875, -37.7292065244883], + [175.24206161499023, -37.72817552152691], + [175.24125158786777, -37.72705540327631], + [175.24045944235698, -37.726063971779396], + [175.241217613002, -37.72520830268023], + [175.2416718005043, -37.724597306969486], + [175.24133384249586, -37.72424796167001], + [175.24136960506442, -37.72360018440756], + [175.24051308653728, -37.722918456098725], + [175.24065613746646, -37.72022259805848], + [175.23881614208224, -37.71968228319226], + [175.23586213610548, -37.71960873198833], + [175.23489654086012, -37.71936120377018], + [175.2339631314681, -37.719038709106286], + [175.2324950699404, -37.71750683879153], + [175.23024380207065, -37.718644075251106], + [175.22889554478755, -37.71953800968769], + [175.22804975509644, -37.720000532005194], + [175.22577524185184, -37.71051613121327], + [175.23112535564, -37.69953788728666], + [175.23058176040652, -37.69722885511175], + [175.21297931583834, -37.69545175572565], + [175.21148443222046, -37.69972464418216], + [175.22623300508715, -37.721195720720836], + [175.22762060165408, -37.72235835933699], + [175.2336287498474, -37.72923198116996], + [175.2354741096497, -37.730674512166026], + [175.23809194564822, -37.73554496802282], + [175.24485468842613, -37.73646698126301], + ], + ], + }, + properties: {}, + }, + name: 'Friday C run', + frequencyInDays: 7, + frequencyOffset: 4, + }, + { + color: 'purple', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.26580139989167, -37.73069254362565], + [175.26646792899558, -37.73054581700122], + [175.26823997497561, -37.730281354715615], + [175.2694988255098, -37.73029125456624], + [175.27081668333273, -37.7304171213538], + [175.2720880510606, -37.73054157403787], + [175.27334868864273, -37.730588244031786], + [175.2736419439316, -37.73160789960122], + [175.27386188507083, -37.73274916188629], + [175.2738404273987, -37.732965533049665], + [175.2730089426041, -37.73532860439727], + [175.27301967144015, -37.73562133150441], + [175.27409791946414, -37.740478735556636], + [175.2741998434067, -37.74101748475441], + [175.2740389108658, -37.74232828335542], + [175.2740389108658, -37.74262946680943], + [175.27455389499667, -37.744241413759184], + [175.27319669723514, -37.74428383294169], + [175.27188777923587, -37.74416505916942], + [175.2700746059418, -37.74380873670902], + [175.2680951356888, -37.743308185593385], + [175.26659309864047, -37.74268461294293], + [175.2649945020676, -37.741772575456075], + [175.26368558406833, -37.74071417368053], + [175.26251614093783, -37.739783022130126], + [175.26191800832748, -37.739647272406486], + [175.26200652122498, -37.73846581508177], + [175.26083976030353, -37.73537314990128], + [175.2598071098328, -37.732677038024654], + [175.25951206684115, -37.73191761192322], + [175.26580139989167, -37.73069254362565], + ], + ], + }, + properties: {}, + }, + name: 'Friday D Run', + frequencyInDays: 7, + frequencyOffset: 4, + }, + { + color: 'orange', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.25382757143237, -37.73865600872107], + [175.25452852249148, -37.739462030367086], + [175.25582313493945, -37.74175843507893], + [175.25666892528537, -37.741055663682644], + [175.25785446166995, -37.7403769242441], + [175.25891125202182, -37.73998240408542], + [175.25988221168518, -37.739753326899354], + [175.261824131012, -37.73948182709422], + [175.2619314193726, -37.7384297559441], + [175.26118040084842, -37.73658435309564], + [175.26000022888186, -37.73359767263043], + [175.25938868522647, -37.7318751856543], + [175.25853037834167, -37.73018660041519], + [175.25730907873367, -37.72787852212611], + [175.25692820505358, -37.72722936214616], + [175.25399208068848, -37.723498349626006], + [175.25354504628922, -37.72405843828552], + [175.24884939215556, -37.72468358182366], + [175.24874567985535, -37.725257804534124], + [175.24971485116112, -37.725812221897826], + [175.2498829364777, -37.726615554402464], + [175.250909328679, -37.727121875758314], + [175.25167465209964, -37.72699458863574], + [175.25161385514366, -37.727662136098374], + [175.24971395730975, -37.72822997120153], + [175.24943590207846, -37.729438462647444], + [175.25038003965165, -37.73029549676707], + [175.25011003039253, -37.73072118181721], + [175.24933397748103, -37.7306462274283], + [175.24925351186545, -37.73093190234252], + [175.24904251054977, -37.73177477637578], + [175.24846047152099, -37.731986201092845], + [175.24813056210408, -37.7325921868507], + [175.24852752674636, -37.73423122089238], + [175.24959683396446, -37.734345768376684], + [175.250412225505, -37.73564819977285], + [175.2536684272491, -37.73505991674817], + [175.25382757143237, -37.73865600872107], + ], + ], + }, + properties: {}, + }, + name: 'Friday E Run', + frequencyInDays: 7, + frequencyOffset: 4, + }, + { + color: 'black', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.27341842738682, -37.73049349090093], + [175.27317523956302, -37.729164096666075], + [175.27341842738682, -37.72773850770977], + [175.27404069987827, -37.72756879291082], + [175.27396917343142, -37.72746979251815], + [175.27424097104813, -37.726773956846095], + [175.27485609076396, -37.726751328020306], + [175.27487754843608, -37.72656746768049], + [175.2749624846183, -37.7261976246728], + [175.2742016314369, -37.726244296886414], + [175.27389138936996, -37.7260208346509], + [175.27349263440559, -37.72595860440685], + [175.2730339763366, -37.72709217493135], + [175.27162671111003, -37.72685881494594], + [175.2716410160065, -37.7263850213711], + [175.2710419895448, -37.726170045470624], + [175.27043044556194, -37.72670324138648], + [175.26437401777008, -37.726158730318915], + [175.26360511714304, -37.722924113792814], + [175.26283442930435, -37.72296654493088], + [175.26106059562153, -37.722924113792814], + [175.25706052747412, -37.723229619308974], + [175.25443375132454, -37.72342055935771], + [175.2540457248688, -37.723079695290465], + [175.25141000660372, -37.717045715828974], + [175.2568244934082, -37.710465204976444], + [175.26100158866032, -37.70913545127014], + [175.27683735068422, -37.71126870339864], + [175.29115677403755, -37.71768506508274], + [175.28937935741854, -37.722463024032486], + [175.28840482234958, -37.726424622331265], + [175.28752505779272, -37.7265335242151], + [175.28653621717245, -37.72509939858881], + [175.28481245062724, -37.72509091255571], + [175.2836573123932, -37.724966450711584], + [175.2830958368577, -37.724812287176015], + [175.28310567151496, -37.72440424777878], + [175.2824288606644, -37.7244360708224], + [175.28197109677424, -37.72485471723241], + [175.2805960177284, -37.724707625800136], + [175.27889013312236, -37.72794923620455], + [175.2771931883399, -37.730558544854], + [175.27617394907796, -37.730479347851464], + [175.27614712682407, -37.73066461288453], + [175.2758592369355, -37.730654713083815], + [175.27342021476218, -37.730598143582455], + [175.27343094359824, -37.73048783326714], + [175.27341842738682, -37.73049349090093], + ], + ], + }, + properties: {}, + }, + name: 'Friday F Run', + frequencyInDays: 7, + frequencyOffset: 4, + }, + { + color: 'violet', + geometry: { + type: 'Feature', + geometry: { + type: 'Polygon', + coordinates: [ + [ + [175.25341987631694, -37.72393963206394], + [175.24874925591575, -37.72463549436526], + [175.24865984916687, -37.72527477604315], + [175.24953246127552, -37.725812221897826], + [175.24978458859553, -37.726608482766736], + [175.25092542160564, -37.72723360504032], + [175.25161921988914, -37.72706247486937], + [175.25145292260277, -37.72759707908294], + [175.24960398674014, -37.72815996513556], + [175.24923205419333, -37.72942714877197], + [175.24715065999777, -37.730649055980244], + [175.24610638618472, -37.73060380120781], + [175.245147943715, -37.73009467534351], + [175.24415373802188, -37.729087726524824], + [175.24212956439447, -37.728120364585386], + [175.24052739143374, -37.72604982814294], + [175.24173796187827, -37.72459447792742], + [175.2413982151848, -37.72424937619766], + [175.2414214612145, -37.7237642509302], + [175.24142861366275, -37.72349835014397], + [175.24058461200184, -37.7228590520975], + [175.2407687902451, -37.720190066031535], + [175.2389037609646, -37.71960024532618], + [175.23590683980734, -37.719538009946675], + [175.23401141254, -37.718977887624376], + [175.23240923925192, -37.71733710106594], + [175.22810697468236, -37.719812411588336], + [175.22623300508715, -37.710629299812396], + [175.2315974231169, -37.69957750311585], + [175.23004531860354, -37.69366895226277], + [175.2422046652646, -37.69763067560704], + [175.2565383902402, -37.71021623037784], + [175.2510452261777, -37.717119270095566], + [175.25379180820894, -37.72325225050565], + [175.25341987631694, -37.72393963206394], + ], + ], + }, + properties: {}, + }, + name: 'Friday G Run', + frequencyInDays: 7, + frequencyOffset: 4, + }, + ], + commonDate: '2017-01-30', + commonStartDate: '2016-05-02', +} diff --git a/_import-artifact/tests/email.test.ts b/_import-artifact/tests/email.test.ts new file mode 100644 index 000000000000..40be31e40769 --- /dev/null +++ b/_import-artifact/tests/email.test.ts @@ -0,0 +1,6 @@ +// tricky because needs authentication as well as content + +// goal.todo('send an email') +// goal.todo('add an api key for the email service') +// // this should know that the customer address is implied by the prior lookup +// goal.todo('look up a customer and then send and email') diff --git a/_import-artifact/tests/fetch.test.ts b/_import-artifact/tests/fetch.test.ts new file mode 100644 index 000000000000..ab51bf10238a --- /dev/null +++ b/_import-artifact/tests/fetch.test.ts @@ -0,0 +1,10 @@ +// import { debug, expect, test } from '../tst-helpers.js' + +// help( +// 'login', +// async ({ help }) => { +// const result = await help('login-github') +// debug(result) +// }, +// 1200000, +// ) diff --git a/_import-artifact/tests/fixtures/fixture.ts b/_import-artifact/tests/fixtures/fixture.ts new file mode 100644 index 000000000000..4fafc3e5ca3a --- /dev/null +++ b/_import-artifact/tests/fixtures/fixture.ts @@ -0,0 +1,79 @@ +import { cradleMaker } from '@/cradle-maker.ts' +import { type CradleMaker, getRoot, Provisioner } from '@/constants.ts' + +export const actorId = 'testerActorId' + +export const infoPath = 'info/test-format.md' +export const requesterPath = 'agents/test-requester.md' +export const assessorPath = 'agents/test-assessor.md' +export const fileRunnerPath = 'agents/test-file-runner.md' +export const meetingsPath = 'agents/meetings.md' +export const routerPath = 'agents/router.md' +export const reasonerPath = 'agents/reasoner.md' + +export const firstTestPath = 'tests/fixtures/test-file-runner.test.md' +export const secondTestPath = 'tests/fixtures/second.test.md' +export const meetingTestPath = 'tests/meetings.test.md' +export const routerTestPath = 'tests/router.test.md' +export const testFixturePath = 'tests/test-fixture.test.md' +export const testRunnterTestPath = 'tests/test-file-runner.test.md' +export const reasonerTestPath = 'tests/reasoner.test.md' + +export const info = await Deno.readTextFile('./HAL/' + infoPath) +export const requester = await Deno.readTextFile('./HAL/' + requesterPath) +export const assessor = await Deno.readTextFile('./HAL/' + assessorPath) +export const fileRunner = await Deno.readTextFile('./HAL/' + fileRunnerPath) +export const meetings = await Deno.readTextFile('./HAL/' + meetingsPath) +export const router = await Deno.readTextFile('./HAL/' + routerPath) +export const testFixture = await Deno.readTextFile('./HAL/' + testFixturePath) +export const reasoner = await Deno.readTextFile('./HAL/' + reasonerPath) + +export const firstTest = await Deno.readTextFile(firstTestPath) +export const secondTest = await Deno.readTextFile(secondTestPath) +export const meetingTest = await Deno.readTextFile('./HAL/' + meetingTestPath) +export const routerTest = await Deno.readTextFile('./HAL/' + routerTestPath) +export const testRunnerTest = await Deno.readTextFile( + './HAL/' + testRunnterTestPath, +) +export const reasonerTest = await Deno.readTextFile('./HAL/' + reasonerTestPath) + +const init: Provisioner = async (backchat) => { + // TODO be able to write a large number of files and do a single commit + // possibly by calling commit directly + // or by allowing an array of writes as in writeMany + + const promises = [] + const target = getRoot(backchat.pid) + + promises.push(backchat.write(infoPath, info, target)) + promises.push(backchat.write(requesterPath, requester, target)) + promises.push(backchat.write(assessorPath, assessor, target)) + promises.push(backchat.write(fileRunnerPath, fileRunner, target)) + promises.push(backchat.write(meetingsPath, meetings, target)) + promises.push(backchat.write(routerPath, router, target)) + promises.push(backchat.write(reasonerPath, reasoner, target), target) + + // seems to overload the atomic mutations limit if all at once + await Promise.all(promises) + promises.length = 0 + + promises.push(backchat.write(firstTestPath, firstTest, target)) + promises.push(backchat.write(secondTestPath, secondTest, target)) + promises.push(backchat.write(meetingTestPath, meetingTest, target)) + promises.push(backchat.write(routerTestPath, routerTest, target)) + promises.push(backchat.write(testFixturePath, testFixture, target)) + promises.push(backchat.write(testRunnterTestPath, testRunnerTest, target)) + promises.push(backchat.write(reasonerTestPath, reasonerTest, target)) + + await Promise.all(promises) +} + +export const fixture: CradleMaker = async ( + t: Deno.TestContext, + url: string, + update?: 'updateSnapshots', +) => { + return await cradleMaker(t, url, update, init) +} + +// Ideally we would get rid of HAL and just have napps. diff --git a/_import-artifact/tests/fixtures/mock-api.ts b/_import-artifact/tests/fixtures/mock-api.ts new file mode 100644 index 000000000000..54a6c2058dbd --- /dev/null +++ b/_import-artifact/tests/fixtures/mock-api.ts @@ -0,0 +1,16 @@ +import DB from '@/db.ts' +import { partialFromRepo, SolidRequest } from '@/constants.ts' +import FS from '@/git/fs.ts' +import Accumulator from '@/exe/accumulator.ts' +import IA from '@/isolate-api.ts' + +export const createMockApi = async (repo: string) => { + const db = await DB.create(DB.generateAesKey()) + const partial = partialFromRepo(repo) + const fs = await FS.init(partial, db) + const accumulator = Accumulator.create(fs) + accumulator.activate(Symbol()) + const api = IA.create(accumulator, null as unknown as SolidRequest) + + return { api, stop: () => db.stop() } +} diff --git a/_import-artifact/tests/fixtures/second.test.md b/_import-artifact/tests/fixtures/second.test.md new file mode 100644 index 000000000000..39f51a79ad9a --- /dev/null +++ b/_import-artifact/tests/fixtures/second.test.md @@ -0,0 +1,17 @@ +--- +target: agents/hamr.md +assessor: agents/test-assessor.md +--- + +# Second starter for 10 + +**Prompts:** + +- list all customers + +**Expectations:** + +- 10 customers listed +- it is short +- purely informational, with no instructions, prompts, or questions at the end +- No suggesting or asking for further actions or clarifications diff --git a/_import-artifact/tests/fixtures/test-file-runner.test.md b/_import-artifact/tests/fixtures/test-file-runner.test.md new file mode 100644 index 000000000000..afb15caa0bca --- /dev/null +++ b/_import-artifact/tests/fixtures/test-file-runner.test.md @@ -0,0 +1,19 @@ +--- +target: agents/hamr.md +assessor: agents/test-assessor.md +iterations: 2 +--- + +# Starter for 10 + +**Prompts:** + +- list all customers +- list customers + +**Expectations:** + +- 10 customers listed +- it is short +- purely informational, with no instructions, prompts, or questions at the end +- No suggesting or asking for further actions or clarifications diff --git a/_import-artifact/tests/git.test.ts b/_import-artifact/tests/git.test.ts new file mode 100644 index 000000000000..61949cfbb339 --- /dev/null +++ b/_import-artifact/tests/git.test.ts @@ -0,0 +1,14 @@ +// import { debug, expect, goal, test } from '../src/test-context.js' + +// // if we could make a help that was aware of the git repo, and the io rules +// // within it, then we could get it to answer questions about how the flow of +// // control worked in the system. The user could then make modifications to the +// // pseudocode and see what changes happened to their now forked version of the +// // underlying code that would be running in a sandbox. + +// test.skip('what was the last action to this process ?') +// test.skip('what parameters were used to call the runner at this point ?') +// goal.todo('please commit everything', async ({ result }) => { +// // this should push whatever the user has done to their branch on github +// // then we can do whatever we wish from this point +// }) diff --git a/_import-artifact/tests/helps.test.ts b/_import-artifact/tests/helps.test.ts new file mode 100644 index 000000000000..a83a0ed7dfa8 --- /dev/null +++ b/_import-artifact/tests/helps.test.ts @@ -0,0 +1,25 @@ +// const isolate = 'engage-help' +// const help = 'help-fixture' + +Deno.test.ignore('chat', async () => { + // const { engageInBand } = await artifact.actions(isolate) + // const text = 'say a single "x" character and do not call any functions' + // const result = await engageInBand({ help, text }) + // expect(result).toEqual('x') +}) +// test('calling another help file', async ({ artifact }) => { +// // this is where one executing help file needs to call another on, in another +// // branch. +// const text = 'ask the calculator tool what five time five is' + +// // give it the command to find suitable helps +// // give it the command to execute the helps it finds in a new branch only +// // this would be 'spawn-help' vs 'self-help' +// }) +// test('multi step', async ({ artifact }) => { +// // when the help hasn't finished, but it sends back a message to the parent +// // that it can't continue on without the response. +// // like if adding a customer, and the request is missing name info. +// }) +// test.skip('add a file to the database') +// test.skip('editing the help and rerunning it') diff --git a/_import-artifact/tests/logins.test.ts b/_import-artifact/tests/logins.test.ts new file mode 100644 index 000000000000..90ed7ef65b16 --- /dev/null +++ b/_import-artifact/tests/logins.test.ts @@ -0,0 +1,88 @@ +import * as github from '@/isolates/github.ts' +import * as actors from '../isolates/actors.ts' +import { expect } from '@utils' +import { Tokens } from '@deno/kv-oauth' +import { getActorId } from '@/constants.ts' +import { Crypto } from '../api/crypto.ts' +import { Backchat } from '../api/client-backchat.ts' +import { cradleMaker } from '@/cradle-maker.ts' + +Deno.test('login with github', async (t) => { + await using cradle = await cradleMaker( + t, + import.meta.url, + undefined, + github.init, + ) + const { engine, backchat } = cradle + const home = backchat.homeAddress + + const state = await backchat.state(home, actors.stateSchema) + const authProvider = state.authProviders.github + const opts = { target: authProvider } + const actions = await backchat.actions('github', opts) + + const githubUserId = 'github-user-id' + + await t.step('login with github', async () => { + const { pid } = backchat + const actorId = getActorId(pid) + const authSessionId = 'mock-session-id' + + await actions.registerAttempt({ actorId, authSessionId }) + const tokens: Tokens = { + accessToken: 'mock-token-1', + tokenType: 'bearer', + } + await actions.authorize({ authSessionId, tokens, githubUserId }) + + const opts = { target: engine.homeAddress } + const admin = await backchat.actions('actors', opts) + const newActorId = await admin.surrender({ authProvider }) + expect(newActorId).toEqual(actorId) + }) + await t.step('second machine login', async () => { + const second = await Backchat.upsert(engine, Crypto.generatePrivateKey()) + + expect(second.pid).not.toEqual(backchat.pid) + const actorId = getActorId(second.pid) + const authSessionId = 'mock-session-id-2' + await actions.registerAttempt({ actorId, authSessionId }) + const tokens: Tokens = { accessToken: 'mock-token-2', tokenType: 'bearer' } + await actions.authorize({ authSessionId, tokens, githubUserId }) + + const opts = { target: engine.homeAddress } + const admin = await second.actions('actors', opts) + await admin.surrender({ authProvider }) + + // TODO assert that the PID for the session has changed + }) + // await t.step('repeat login still succeeds', async () => { + // const { pid } = session + // const actorId = pid.branches.slice(-3)[0] + // const authSessionId = 'mock-session-id' + // await github.registerAttempt({ actorId, authSessionId }) + // const tokens: Tokens = { accessToken: 'mock-token-3', tokenType: 'bearer' } + // await github.authorize({ authSessionId, tokens, githubUserId }) + + // const actor = await session.actions('home', pid) + // await actor.surrender({ authProvider }) + // // verify that the PID for the session has not changed + // // verify that the latest PAT is the one that is stored + // }) + // // test trying to login with github when already authed + // await t.step('second machine revoked', async () => { + // // using the first session, deauth the second, then attempt to make another + // // request with the second session, watch it get denied. + // // it should then become an unauthorized machine and reauthenticate as its + // // original pure machine name + // }) +}) +// do a push to a github repo using the authed token + +// shared sessions, where a multi app allows other to connect to a session +// possibly with read only mode + +// test login with github and continue existing sessions + +// test login to github where already have existing account diff --git a/_import-artifact/tests/reasoner.test.ts b/_import-artifact/tests/reasoner.test.ts new file mode 100644 index 000000000000..8ffae877e25b --- /dev/null +++ b/_import-artifact/tests/reasoner.test.ts @@ -0,0 +1,37 @@ +import { expect, log } from '@utils' +import { type Api } from '@/isolates/longthread.ts' +import { + actorId, + fileRunnerPath, + fixture, + reasonerTestPath, +} from './fixtures/fixture.ts' +import { TestFile } from '@/api/tps-report.ts' +import { addBranches } from '@/constants.ts' + +Deno.test.ignore('reasoner', async (t) => { + log('reasoner') + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner AI:completions AI:napps', + // ) + + const opts = { branchName: 'runner', noClose: true } + const { start } = await backchat.actions('longthread', opts) + await start({}) + const target = addBranches(backchat.pid, opts.branchName) + const { run } = await backchat.actions('longthread', { target }) + + await t.step('run', async () => { + const path = fileRunnerPath + const content = 'run ' + reasonerTestPath + await run({ path, content, actorId }) + + const tpsPath = reasonerTestPath.replace('.test.md', '.tps.json') + const tps = await backchat.readJSON(tpsPath, target) + + expect(tps).toBeTruthy() + expect(tps.summary.completed).toBe(1) + }) +}) diff --git a/_import-artifact/tests/ssl/cert.pem b/_import-artifact/tests/ssl/cert.pem new file mode 100644 index 000000000000..e82d9e3cad3c --- /dev/null +++ b/_import-artifact/tests/ssl/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIUS3bBjpDCYwPcD+BTMauYEBqzMdYwDQYJKoZIhvcNAQEL +BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE +CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAgFw0yNDA1MTkyMzI0MTJaGA8yMTI0MDQy +NTIzMjQxMlowQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEc +MBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAK3nvdOr/3im90jHAqNn7zYMCsJi4WhuUkqretcsqch0C9lL +K23fXcXvuPAE9LNZuOlLXSSS6efTdYZuuOt1C2eZSSAmm/eMRpVI+rPSFKm2QCEM +EZfpK6pdpNr1OpSE6RP4ltk/GnaCTdek2iMrcTa72Da/L+Fn5gDsc+6elq2Jm/x7 +XKcD7PZWaRGAp2o92NxmyHgyOXqktTPK6qYAtPutd3ldBl48dEBWLBxb1IRJ7LKo +kKmDIJyNcKFHa1YR3885RXMyKsjSxdVNeOLJ5zCi4KFs2ZhQ+UKbtJeKtG9CWCYF +TbitOko7hggypnXeje2IDjK7KCvt+9eiJFJxygkCAwEAAaNTMFEwHQYDVR0OBBYE +FBPyrfpc+ii2y7BChcxuY1NlipvjMB8GA1UdIwQYMBaAFBPyrfpc+ii2y7BChcxu +Y1NlipvjMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBABRBymvo +cIzRN4OI0t8Jzy34MVxOMJzjsQo1MH/Anzpjexl93pMGxKFzSfzGxIuMnzCrkiN4 +AqrKAB/c9xhePMPgGlOl7IWkiJoK++Mb2KI7vFRDjS9iPDBk6Xp1yMW7coQSzaDK +g6Qx15ppwjK7bIjuG7Hkkot7td+KbMie7Hot0jt2R8WBqBYAt4zkhnsg7iDjubBb +c6TLK23y4HnBQlMXVXP/7EeR6BE/56zZZ9rOftVrIr9//1Ratm4lFt4PvPiAxKJQ +S8pf9ck8TOLkewtsya7LqHfw4171MI/229SH9abiqa4gjcbF8ZX+wcf62bSEuQ+p +es7vZ4J3c+81/NQ= +-----END CERTIFICATE----- diff --git a/_import-artifact/tests/ssl/key.pem b/_import-artifact/tests/ssl/key.pem new file mode 100644 index 000000000000..a0d916c89b76 --- /dev/null +++ b/_import-artifact/tests/ssl/key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCt573Tq/94pvdI +xwKjZ+82DArCYuFoblJKq3rXLKnIdAvZSytt313F77jwBPSzWbjpS10kkunn03WG +brjrdQtnmUkgJpv3jEaVSPqz0hSptkAhDBGX6SuqXaTa9TqUhOkT+JbZPxp2gk3X +pNojK3E2u9g2vy/hZ+YA7HPunpatiZv8e1ynA+z2VmkRgKdqPdjcZsh4Mjl6pLUz +yuqmALT7rXd5XQZePHRAViwcW9SESeyyqJCpgyCcjXChR2tWEd/POUVzMirI0sXV +TXjiyecwouChbNmYUPlCm7SXirRvQlgmBU24rTpKO4YIMqZ13o3tiA4yuygr7fvX +oiRSccoJAgMBAAECggEADHII3p7A5Q9s48VTQeK4prForTXinATGKKAaRc8lVOpC +JVI4+M9bEITqAypVoUa24iJBt5H4gZ3bW6wtmXMqRmccybtdntMIA5vCDk+m6SjY +8MyCb1DRbksulrllUHbtD7/y+rfmgsvrOncqocj0fJZnoC5hRLQ47IC/gLiSf7d6 +iF4DfpC5ZGOF4/shWiE7EJfCa5dLXKobaan9ytGujnquH6vRtFMhdINzkfOErdiU +A00HYR9/iVGs3eY6aNdXLDVJ7TcRzq0aLfMHcVYHv/8qW2kPOF5vRgHY2Hv4L9OC +EopiFkJmEPPyoNM0rFAhcVc0eIrHnVY+3nBqQDqW8QKBgQDyPDM3qhbrxmwiOjKZ +fbPF5mFvDdrAPUG3ml8GpKu9zhNSRTLNlr7uINH9I5dPXuZIRQhuRu2KBLF4q944 +1IYZiqo2N0x5OVz4QyCZrezZmva88kByRd5TfBo6/qtD7LMf4cw07I3N3VtagKIT +tMuNLqO7EzceiwrpR/LPGXr3EQKBgQC3yYtyHa8acG4GNv6CVbdAS3kVobtCoclb +PCGO4UzP7KD/JbChv2oMxVuHRYGWcwol3Tz5kcHhWrHM0g57/bqGHCRztrxoRsSG +rAeAkvxydX5hNWPkvJiWpUBfYtwPRm7Gn11y591/QDSqo08E+zOymu1sSvFycQTL +HbGZ8rDTeQKBgQCzRHQPDPSblqPuDK3nxAxoOCABZPpv7KWW8ErFCtcCx+CqsQVw ++s/gQS/GrDFNjwjTkSs1GY6v5ttEV8VEv5tiiamAKBqTWC2CmYMl2H6BaFqT6KBj +MWvg0uZCpLgws5q4zI0o2av0H459AtdvyAwgNtT7HLRdCT+Q9ysts0i6UQKBgQCd +DboANqWf7lvXK7+qGR2T7+r1NVQUyLSNJoF228kMFaYu40JhtnUlnLFuk1L+Aii1 +2q10h74qmZo+8XI3OsfeOsZbq9itM2xVuURf9zuLhCQPZdSLby7l7SlpWc27r/CS +AzWUL9f0GiXne87W7uNqyUICvLg/l50FBcbzS5UX4QKBgCSRbW9yl3Q/x7OQGLxV +rqhWc6+Y+fIW/nccqPccJ/7OVNziX2INePt5HkzDnBjO5dXmPr8HJHxQRF60hEwi +tsPnkmEgI2xxOmhUKJrwiIvxS7SxfCxUw62w0LvXiiX8N8LUcjB9aLdnr4HSOb+e +1Ws/F4Hx7Lv/S1qO+SHOCyIB +-----END PRIVATE KEY----- diff --git a/_import-artifact/tests/test-controller.test.ts b/_import-artifact/tests/test-controller.test.ts new file mode 100644 index 000000000000..742ea2fd29a4 --- /dev/null +++ b/_import-artifact/tests/test-controller.test.ts @@ -0,0 +1,54 @@ +// import { Engine } from '@/engine.ts' +// import { Crypto } from '../api/crypto.ts' +// import DB from '@/db.ts' +// import { Backchat } from '../api/client-backchat.ts' +// import { log } from '@utils' +// import { type Api } from '@/isolates/longthread.ts' +// import { fixture } from '@/tests/fixtures/fixture.ts' + +// const actorId = 'test-controller-id' +// const assessorPath = 'agents/test-assessor.md' +// const controllerPath = 'agents/test-controller.md' + +// Deno.test.ignore('test file runner', async (t) => { +// await using cradle = await fixture() + +// const actions = await backchat.actions('longthread') +// await t.step('test count', async () => { +// const content = `how many tests in ./${simplePath} ?` +// await actions.run({ content, path: synthPath, actorId }) +// }) +// log.enable('AI:tests AI:longthread AI:synth AI:agents AI:qbr*') +// await t.step('run tests', async () => { +// const content = `run tests in ./${simplePath}` +// await actions.run({ content, path: synthPath, actorId }) +// }) + +// }) +// Deno.test.ignore('test controller', async (t) => { +// await using cradle = await cradleMaker() + +// await backchat.write(synthPath, synth) +// await backchat.write(assessorPath, assessor) +// await backchat.write(simplePath, simple) + +// const actions = await backchat.actions('longthread') +// await t.step('test count', async () => { +// const content = `how many tests in ./${simplePath} ?` +// await actions.run({ content, path: synthPath, actorId }) +// }) +// log.enable('AI:tests AI:longthread AI:synth AI:agents AI:qbr*') +// await t.step('run tests', async () => { +// const content = `run tests in ./${simplePath}` +// await actions.run({ content, path: synthPath, actorId }) +// }) + +// }) + +// export const cradleMaker = async () => { +// const engine = await Engine.provision(superuserKey, aesKey) +// const backchat = await Backchat.upsert(engine, privateKey) +// return { backchat, engine } +// } + +// // first thing to do is generate a full tps report diff --git a/_import-artifact/tests/test-file-runner.test.ts b/_import-artifact/tests/test-file-runner.test.ts new file mode 100644 index 000000000000..a7fb4c3d94b2 --- /dev/null +++ b/_import-artifact/tests/test-file-runner.test.ts @@ -0,0 +1,168 @@ +import { expect, log } from '@utils' +import { type Api } from '@/isolates/longthread.ts' +import { + actorId, + fileRunnerPath, + firstTestPath, + fixture, + meetingTestPath, + routerTestPath, + testFixturePath, + testRunnterTestPath, +} from '@/tests/fixtures/fixture.ts' +import { TestFile } from '@/api/tps-report.ts' +import { addBranches } from '@/constants.ts' + +Deno.test.ignore('test file runner', async (t) => { + // cannot seem to get this test to pass without flickering + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner', + // ) + + const opts = { branchName: 'runner', noClose: true } + const { drone } = await backchat.actions('longthread', opts) + const target = addBranches(backchat.pid, opts.branchName) + + await t.step('run', async () => { + const path = fileRunnerPath + const content = firstTestPath + + const result = await drone({ + path, + content, + actorId, + stopOnTools: ['utils_resolve', 'utils_reject'], + }) + expect(result?.functionName).toBe('utils_resolve') + expect(result?.args).toEqual({}) + + const tpsPath = firstTestPath.replace('.test.md', '.tps.json') + const tps = await backchat.readJSON(tpsPath, target) + log('done', tps) + expect(tps).toBeTruthy() + expect(tps.summary.completed).toBe(2) + expect(tps.cases).toHaveLength(1) + expect(tps.cases[0].iterations).toHaveLength(2) + expect(tps.cases[0].iterations[0].outcomes).toHaveLength(4) + expect(tps.cases[0].iterations[1].outcomes).toHaveLength(4) + }) +}) +Deno.test.ignore('router', async (t) => { + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner AI:completions', + // ) + + const opts = { branchName: 'runner', noClose: true } + const { start } = await backchat.actions('longthread', opts) + await start({}) + const target = addBranches(backchat.pid, opts.branchName) + const { run } = await backchat.actions('longthread', { target }) + + await t.step('run', async () => { + const path = fileRunnerPath + const content = 'run ' + routerTestPath + await run({ path, content, actorId }) + + const tpsPath = routerTestPath.replace('.test.md', '.tps.json') + const tps = await backchat.readJSON(tpsPath, target) + expect(tps).toBeTruthy() + expect(tps.summary.completed).toBe(1) + expect(tps.cases).toHaveLength(5) + expect(tps.cases[0].iterations).toHaveLength(1) + }) +}) +Deno.test.ignore('test meeting bot', async (t) => { + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner', + // ) + const opts = { branchName: 'runner', noClose: true } + const { drone } = await backchat.actions('longthread', opts) + const target = addBranches(backchat.pid, opts.branchName) + + await t.step('run', async () => { + const path = fileRunnerPath + const content = meetingTestPath + + const result = await drone({ + path, + content, + actorId, + stopOnTools: ['utils_resolve', 'utils_reject'], + }) + expect(result?.functionName).toBe('utils_resolve') + expect(result?.args).toEqual({}) + + const tpsPath = meetingTestPath.replace('.test.md', '.tps.json') + const tps = await backchat.readJSON(tpsPath, target) + log('done', tps) + expect(tps).toBeTruthy() + expect(tps.summary.completed).toBe(1) + expect(tps.cases).toHaveLength(1) + expect(tps.cases[0].iterations).toHaveLength(1) + expect(tps.cases[0].iterations[0].outcomes).toHaveLength(1) + }) +}) +Deno.test.ignore('test fixture', async (t) => { + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner', + // ) + + const opts = { branchName: 'runner', noClose: true } + const { drone } = await backchat.actions('longthread', opts) + + await t.step('run', async () => { + const path = fileRunnerPath + const content = testFixturePath + + const result = await drone({ + path, + content, + actorId, + stopOnTools: ['utils_resolve', 'utils_reject'], + }) + expect(result?.functionName).toBe('utils_resolve') + expect(result?.args).toEqual({}) + + const tpsPath = testFixturePath.replace('.test.md', '.tps.json') + const target = addBranches(backchat.pid, opts.branchName) + const tps = await backchat.readJSON(tpsPath, target) + log('done', tps) + }) +}) +Deno.test.ignore('test the tester', async (t) => { + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + // log.enable( + // 'AI:tests AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller AI:utils AI:test-case-runner', + // ) + + const opts = { branchName: 'runner', noClose: true } + const { drone } = await backchat.actions('longthread', opts) + + await t.step('run', async () => { + const path = fileRunnerPath + const content = testRunnterTestPath + + const result = await drone({ + path, + content, + actorId, + stopOnTools: ['utils_resolve', 'utils_reject'], + }) + expect(result?.functionName).toBe('utils_resolve') + expect(result?.args).toEqual({}) + + const tpsPath = testRunnterTestPath.replace('.test.md', '.tps.json') + const target = addBranches(backchat.pid, opts.branchName) + const tps = await backchat.readJSON(tpsPath, target) + log('done', tps) + }) +}) diff --git a/_import-artifact/tests/test-requester.test.ts b/_import-artifact/tests/test-requester.test.ts new file mode 100644 index 000000000000..c1ca92f474e4 --- /dev/null +++ b/_import-artifact/tests/test-requester.test.ts @@ -0,0 +1,51 @@ +import { log } from '@utils' +import { type Api } from '@/isolates/longthread.ts' +import { + actorId, + firstTestPath, + fixture, + requesterPath, +} from '@/tests/fixtures/fixture.ts' + +Deno.test.ignore('test-requester', async (t) => { + log('test-requester') + // test interrogating the test suites + // run multiple test files + // interrupt a running test + // be notified when a test completes + + // at the start, can only do full awaiting of the test to complete + + // but now we need to merge back up to parent + + // OR we need to ensure that the controller receives the files from our branch + await using cradle = await fixture(t, import.meta.url) + const { backchat } = cradle + const target = await backchat.threadPID() + const { run } = await backchat.actions('longthread', { target }) + + // log.enable( + // 'AI:tests AI:longthread AI:execute-tools AI:agents AI:qbr* AI:test-registry AI:test-controller', + // ) + await t.step('test count', async () => { + const content = `how many test cases in ./${firstTestPath} ?` + await run({ content, path: requesterPath, actorId }) + }) + // await t.step('test description', async () => { + // const content = `name all the test cases in ./${firstTestPath}` + // await actions.run({ content, path: requesterPath, actorId }) + // }) + // await t.step('list all', async () => { + // const content = `list all test files in tests/fixtures` + // await actions.run({ content, path: requesterPath, actorId }) + // }) + await t.step('run all', async () => { + const content = `run the first test file in ./tests` + await run({ content, path: requesterPath, actorId }) + }) + + // test running multiple files + // test summarizing multiple tps reports +}) + +// first thing to do is generate a full tps report diff --git a/_import-artifact/tests/tps-reports.test.ts b/_import-artifact/tests/tps-reports.test.ts new file mode 100644 index 000000000000..9c9f82f4d4ba --- /dev/null +++ b/_import-artifact/tests/tps-reports.test.ts @@ -0,0 +1,105 @@ +import { + addCase, + addIteration, + create, + type TestController, + testController, + type TestFile, + testFile, +} from '@/api/tps-report.ts' +import { expect } from '@utils' +import { delay } from '@std/async' +const raw: TestFile = { + summary: { + timestamp: Date.now(), + path: 'tests/test-example.test.md', + hash: '29426920ac614d1672d6f2dfcdd22c0052c22e32', + target: 'the/agent/to/test.md', + assessor: 'agents/assessor.md', + elapsed: 1000, + iterations: 5, + completed: 4, + }, + cases: [ + { + summary: { + name: 'test name 1', + timestamp: Date.now(), + elapsed: 50, + iterations: 3, + dependencies: [], + promptLists: [['prompt1'], ['multiple prompts', 'prompt2']], + expectations: ['expectation 1', 'expectation 2'], + completed: 1, + successes: [1, 0], + }, + iterations: [ + { + commit: '29426920ac614d1672d6f2dfcdd22c0052c22e32', + prompts: ['prompt used'], + outcomes: [{ + reasoning: [], + outcome: true, + }, { + reasoning: [], + outcome: false, + }], + }, + ], + }, + ], +} + +Deno.test('tps report', async () => { + const tpsReport = testFile.parse(raw) + expect(tpsReport).toEqual(raw) + + const created = create( + 'tests/test-example.test.md', + 'the/agent/to/test.md', + 'agents/assessor.md', + 5, + '29426920ac614d1672d6f2dfcdd22c0052c22e32', + ) + expect(created.cases).toHaveLength(0) + + const added = addCase(created, 'test name', [['prompt']], ['e1', 'e2'], []) + expect(added.cases).toHaveLength(1) + expect(added.cases[0].summary.expectations).toHaveLength(2) + + await delay(1) + + const updated = addIteration(added, 0, 0, { + commit: '29426920ac614d1672d6f2dfcdd22c0052c22e32', + prompts: ['prompt used'], + outcomes: [{ + reasoning: [], + outcome: true, + }, { + reasoning: [], + outcome: false, + }], + }) + expect(updated.summary.completed).toEqual(1) + expect(updated.summary.elapsed).toBeGreaterThan(0) + expect(updated.cases[0].summary.completed).toEqual(1) + expect(updated.cases[0].summary.elapsed).toBeGreaterThan(0) +}) + +Deno.test('dependencies', () => { + let tpsReport = testFile.parse(raw) + expect(tpsReport).toEqual(raw) + + tpsReport = addCase(tpsReport, 'test name', [['prompt']], ['e1'], [0]) + addCase(tpsReport, 'test name', [['prompt']], ['e1'], [0, 1]) +}) + +const controller: TestController = { + globs: ['tests/*.test.md'], + files: [{ path: 'tests/test-example.test.md', status: 'pending' }], + concurrency: 2, +} +Deno.test('controller', () => { + const parsed = testController.parse(controller) + expect(parsed).toEqual(controller) +}) diff --git a/_import-artifact/utils.ts b/_import-artifact/utils.ts new file mode 100644 index 000000000000..c846bb4e65a4 --- /dev/null +++ b/_import-artifact/utils.ts @@ -0,0 +1,54 @@ +export { default as equal } from 'fast-deep-equal/es6' +export { delay } from '@std/async' +export { expect } from '@std/expect' +export { assert, AssertionError } from '@std/assert' +export { default as merge } from 'lodash.merge' +import Debug from 'debug' +import { deserializeError, serializeError } from 'serialize-error' +export { deserializeError, serializeError } +export { Debug } +const _log = Debug('AI:tests') +export const log = (...args: unknown[]) => { + _log(...args) +} +log.enable = (...args: string[]) => { + Debug.enable(...args) +} +export * as posix from '@std/path/posix' + +const isDenoDeploy = Deno.env.get('DENO_DEPLOYMENT_ID') !== undefined +let _isTestMode = false +export const isKvTestMode = () => { + return _isTestMode +} +export const openKv = async () => { + if (isDenoDeploy) { + return Deno.openKv() + } + const KEY = 'DENO_KV_PATH' + let path = ':memory:' + const permission = await Deno.permissions.query({ + name: 'env', + variable: KEY, + }) + if (permission.state === 'granted') { + const env = Deno.env.get(KEY) + if (env) { + path = env + } + } + log('open kv', path) + _isTestMode = path === ':memory:' + return Deno.openKv(path) +} +const getDebug = () => { + if (isDenoDeploy) { + const string = Deno.env.get('DEBUG') + if (string) { + return string + } + } + return '' +} + +Debug.enable(getDebug())