diff --git a/experiments/project-review.sh b/experiments/project-review.sh new file mode 100755 index 0000000..c805d7e --- /dev/null +++ b/experiments/project-review.sh @@ -0,0 +1,6 @@ +find ./src -name '*.rs' | sort -r | while read -r file; do + echo "Processing $file" + echo "File: $file" >> all_outputs.txt + cat "$file" | target/debug/pipe-gpt -p "Can this code be improved for efficiency? Be very concise." >> all_outputs.txt +done +cat all_outputs.txt | target/debug/pipe-gpt --markdown -p "Provide the top 10 improvements from the above suggestions." diff --git a/experiments/scripts-review.sh b/experiments/scripts-review.sh new file mode 100644 index 0000000..1b8a738 --- /dev/null +++ b/experiments/scripts-review.sh @@ -0,0 +1,4 @@ +### pipe-gpt for en mass single-file code reviews +find ./src -name '*.rs' | while read -r file; do cat "$file" | pipe-gpt --markdown -p "Can this code be improved for efficiency?"; done + +find ./src -name '*.rs' | while read -r file; do cat "$file" | pipe-gpt --markdown -p "Can this code be improved for readability?"; done diff --git a/src/api/openai.rs b/src/api/openai.rs index 07176f8..ba06bc8 100644 --- a/src/api/openai.rs +++ b/src/api/openai.rs @@ -10,8 +10,6 @@ use openai_api_rust::{ }; use regex::Regex; -use crate::config::models::MAX_TOKENS; - pub enum AssistantPurpose { CodeReviewer, Default, diff --git a/src/cli/parse.rs b/src/cli/parse.rs index 8435fb1..de95422 100644 --- a/src/cli/parse.rs +++ b/src/cli/parse.rs @@ -150,7 +150,7 @@ mod tests { let input = "Test".to_string(); let (chat_body, render_markdown) = parse_arguments(&input, command); - assert_eq!(chat_body.model, "gpt-4"); + assert_eq!(chat_body.model, "gpt-4o"); assert_eq!(chat_body.max_tokens.unwrap(), *MAX_TOKENS); assert_eq!(chat_body.temperature.unwrap(), *TEMPERATURE); assert_eq!(render_markdown, false); diff --git a/src/config/models.rs b/src/config/models.rs index 7265a5d..2a1c506 100644 --- a/src/config/models.rs +++ b/src/config/models.rs @@ -1,5 +1,5 @@ -/// Defines which gpt model to use. Currently set to "gpt-4" -pub const MODEL: &str = "gpt-4"; +/// Defines which gpt model to use. Currently set to "gpt-4o" +pub const MODEL: &str = "gpt-4o"; /// Defines default maximum number of tokens available in conversation and response pub const MAX_TOKENS: &i32 = &4096; /// Defines default temperature of response