Compare commits

...

349 Commits

Author SHA1 Message Date
Paul Gauthier
c62dde4c9c set version to 0.66.1.dev 2024-12-01 09:15:58 -08:00
Paul Gauthier
3fef1babcb version bump to 0.66.0 2024-12-01 09:14:21 -08:00
Paul Gauthier
16af1751a6 copy 2024-12-01 06:09:11 -08:00
Paul Gauthier
b0e138952e libportaudio2 2024-12-01 06:06:42 -08:00
Paul Gauthier (aider)
1271c037ef ci: add PortAudio system dependency for Ubuntu tests 2024-12-01 06:06:03 -08:00
Paul Gauthier
36b59ba617 copy 2024-12-01 06:04:49 -08:00
Paul Gauthier (aider)
b671db7108 fix: correct sounddevice module mocking in voice tests 2024-11-30 19:15:58 -08:00
Paul Gauthier (aider)
9304b80b69 fix: update sounddevice mocking in voice tests 2024-11-30 19:15:40 -08:00
Paul Gauthier (aider)
03c2964364 style: Format code with black and sort imports 2024-11-30 19:15:20 -08:00
Paul Gauthier (aider)
49c78f2797 test: update voice tests to mock audio dependencies properly 2024-11-30 19:15:16 -08:00
Paul Gauthier
1a8d112055 copy 2024-11-30 18:38:40 -08:00
Paul Gauthier (aider)
7b193d693f docs: clarify Control-Up/Down for message history navigation 2024-11-30 18:23:34 -08:00
Paul Gauthier (aider)
e8fa5c36c2 feat: add ctrl-up/down bindings for history navigation 2024-11-30 18:21:47 -08:00
Paul Gauthier (aider)
a6e162c37a refactor: remove unused Mock import from test_voice.py 2024-11-30 15:43:18 -08:00
Paul Gauthier (aider)
9c55b7a317 style: Remove extra blank line in test_voice.py 2024-11-30 15:43:13 -08:00
Paul Gauthier (aider)
4ef4e8cd72 test: remove test_record_and_transcribe test case 2024-11-30 15:43:10 -08:00
Paul Gauthier (aider)
e9942737c6 style: remove trailing whitespace in test_voice.py 2024-11-30 15:42:45 -08:00
Paul Gauthier (aider)
5c208dba41 fix: improve voice test mocking to handle async prompt behavior 2024-11-30 15:42:42 -08:00
Paul Gauthier (aider)
ba032ce60e fix: update voice tests to match expected behavior 2024-11-30 15:42:12 -08:00
Paul Gauthier (aider)
2fe0dda8af style: remove unused tempfile import from test_voice.py 2024-11-30 15:41:38 -08:00
Paul Gauthier (aider)
97daff4a10 style: format test_voice.py with black and sort imports 2024-11-30 15:41:19 -08:00
Paul Gauthier (aider)
caeceb58a5 test: add voice input device tests with mocked hardware dependencies 2024-11-30 15:41:15 -08:00
Paul Gauthier
3739d48e52 test: add basic voice tests 2024-11-30 15:41:11 -08:00
Paul Gauthier
858fdb02ec copy 2024-11-30 14:05:51 -08:00
Paul Gauthier (aider)
b0cbb071e6 style: fix line wrapping in update-history.py 2024-11-30 14:01:25 -08:00
Paul Gauthier (aider)
f222739b0d fix: preserve older entries when updating HISTORY.md 2024-11-30 14:01:21 -08:00
Paul Gauthier (aider)
a280f5d5b2 style: remove trailing whitespace in update-history.py 2024-11-30 13:59:57 -08:00
Paul Gauthier (aider)
cef333a01c feat: extract history content up to end of base version section 2024-11-30 13:59:51 -08:00
Paul Gauthier (aider)
24bd016a1a style: Fix whitespace in update-history script 2024-11-30 13:59:09 -08:00
Paul Gauthier (aider)
3c44f824cb refactor: optimize HISTORY.md processing to work with relevant portion only 2024-11-30 13:59:05 -08:00
Paul Gauthier
3ee78ef557 skip ph.shutdown for faster exit 2024-11-30 13:29:39 -08:00
Paul Gauthier
d9b1bcbdcc remove no-op vscode detection 2024-11-30 13:25:13 -08:00
Paul Gauthier
283dd0b1f1 test: refactor read-only file path handling in command tests 2024-11-30 13:18:49 -08:00
Paul Gauthier (aider)
56345ddef9 test: add cross-platform test for relative path handling in drop command 2024-11-30 13:15:19 -08:00
Paul Gauthier (aider)
4631008f8c style: fix whitespace in commands.py 2024-11-30 13:12:37 -08:00
Paul Gauthier (aider)
e2ebde75be fix: improve /drop handling of relative paths with samefile check 2024-11-30 13:12:31 -08:00
Paul Gauthier
91613fcbf7 copy 2024-11-30 13:07:58 -08:00
Paul Gauthier
0b279143cc copy 2024-11-30 13:03:28 -08:00
Paul Gauthier
ade4847c61 feat: add timeout configuration for litellm requests 2024-11-30 13:03:20 -08:00
Paul Gauthier (aider)
a3e7203331 refactor: change timeout argument type from float to int 2024-11-30 13:00:24 -08:00
Paul Gauthier (aider)
bbabd38a48 feat: add timeout option for API calls 2024-11-30 12:59:39 -08:00
Paul Gauthier
eb996c1e42 copy 2024-11-30 12:19:55 -08:00
Paul Gauthier
bf0ad4cda1 test: simplify browser flag test by removing analytics flags 2024-11-30 12:18:18 -08:00
Paul Gauthier (aider)
c32af6536a ci: add fetch-depth: 0 to all checkout actions 2024-11-30 12:16:34 -08:00
Paul Gauthier (aider)
2437f9b051 chore: update actions/checkout to v4 in pages workflow 2024-11-30 12:16:07 -08:00
Paul Gauthier
0b781174bd ci: remove redundant AIDER_ANALYTICS_LOG environment variable 2024-11-30 11:37:33 -08:00
Paul Gauthier
f448d3e2db chore: add pytest-env and remove debug dump from tests 2024-11-30 11:35:40 -08:00
Paul Gauthier (aider)
6b4222ec48 test: add verification for pytest.ini environment variables 2024-11-30 11:32:49 -08:00
Paul Gauthier
3f770cc5c0 test: remove analytics env vars and add debug dump 2024-11-30 11:32:48 -08:00
Paul Gauthier (aider)
14902f5b9a ci: add analytics env vars to GitHub Actions workflows 2024-11-30 11:25:39 -08:00
Paul Gauthier (aider)
efdeb13bf5 test: disable analytics during test execution 2024-11-30 11:24:46 -08:00
Paul Gauthier
62558291b6 test: disable analytics in test environment 2024-11-30 10:59:46 -08:00
Paul Gauthier
edb0120bbf test: add analytics flags to browser test 2024-11-30 10:58:06 -08:00
Paul Gauthier
0dbaec553f refactor: Improve version handling and cleanup version-related files 2024-11-30 10:10:03 -08:00
Paul Gauthier
295040c94c feat: add exit event tracking for Control-C and /exit commands 2024-11-30 09:53:44 -08:00
Paul Gauthier
c1c4193b1d feat: add event tracking for message sending lifecycle 2024-11-30 09:49:07 -08:00
Paul Gauthier
d17f0a9e1f copy 2024-11-30 09:45:17 -08:00
Paul Gauthier
871030dadb fix: normalize path in cmd_drop test for cross-platform compatibility 2024-11-30 09:36:21 -08:00
Paul Gauthier
0ee9b74c0f Merge branch 'main' of github.com:Aider-AI/aider 2024-11-30 09:33:06 -08:00
paul-gauthier
82929f650c Merge pull request #2497 from preynal/main
feat: ability to select audio input device
2024-11-30 09:33:00 -08:00
Paul Gauthier
37b31c46bd copy 2024-11-30 09:19:22 -08:00
Paul Gauthier
c682bd858a copy 2024-11-30 09:19:01 -08:00
Paul Gauthier
2439891ee0 better handle __version__ errors 2024-11-30 09:18:17 -08:00
Philippe de Reynal
23825cafe7 fix options diff + missing docs 2024-11-30 11:33:10 +01:00
Philippe de Reynal
a8f6f1fce2 Merge branch 'Aider-AI:main' into main 2024-11-30 11:24:58 +01:00
Philippe de Reynal
e11faadf39 feat: ability to select audio input device 2024-11-30 11:24:34 +01:00
Paul Gauthier
2c7251f4b9 copy 2024-11-29 09:23:57 -08:00
Paul Gauthier
fb9f18fc9c copy 2024-11-29 09:23:14 -08:00
Paul Gauthier (aider)
93cd3d0d8b feat: add git diff output to history update script 2024-11-29 09:23:01 -08:00
Paul Gauthier
ed2479ea82 refactor: consolidate analytics exit events in main function 2024-11-29 09:20:52 -08:00
Paul Gauthier (aider)
accde0bfd0 feat: add analytics events before each return statement in main() 2024-11-29 09:17:16 -08:00
Paul Gauthier
7b34d9e4f4 copy 2024-11-28 15:53:00 -08:00
Paul Gauthier (aider)
6af71951af style: fix whitespace in benchmark.py 2024-11-28 14:01:50 -08:00
Paul Gauthier (aider)
3eed45dc3e fix: improve benchmark directory selection based on latest .md file timestamp 2024-11-28 14:01:45 -08:00
Paul Gauthier (aider)
320b059bc7 perf: optimize benchmark dir search by filtering on timestamp first 2024-11-28 14:00:12 -08:00
Paul Gauthier
a89ce06377 fix: correct glob pattern for finding latest benchmark directory 2024-11-28 14:00:10 -08:00
Paul Gauthier
e4a1d6fe89 copy 2024-11-28 11:21:37 -08:00
Paul Gauthier
93c625bb81 Merge branch 'main' of github.com:Aider-AI/aider 2024-11-28 11:19:19 -08:00
Paul Gauthier
87f84fb82d copy 2024-11-28 11:18:30 -08:00
paul-gauthier
ce9e76a7dc Merge pull request #2489 from itlackey/benchmark/20241127
Benchmark/20241127
2024-11-28 11:01:54 -08:00
Paul Gauthier
4f3f1c5e23 better 2024-11-28 10:51:53 -08:00
itlackey
22076d401f Added ollama/granite3-dense:8b 2024-11-28 10:49:51 -06:00
itlackey
79ea82f147 Updated ollama/qwen2.5-coder:32b, added ollama/tulu3 2024-11-28 10:49:50 -06:00
Paul Gauthier
3785415632 copy 2024-11-27 17:49:35 -08:00
Paul Gauthier
35eda73ad1 Merge branch 'main' of github.com:Aider-AI/aider 2024-11-27 17:49:22 -08:00
Paul Gauthier (aider)
c8282fc8d3 style: fix whitespace in test_commands.py 2024-11-27 17:39:00 -08:00
Paul Gauthier (aider)
5030881934 test: add tests for /drop command with and without glob patterns 2024-11-27 17:38:54 -08:00
Paul Gauthier (aider)
bc1e3a7059 style: format list comprehension to single line 2024-11-27 17:35:11 -08:00
Paul Gauthier (aider)
ba17dceb4d feat: make /drop use substring matching for non-glob patterns 2024-11-27 17:35:05 -08:00
paul-gauthier
62109f4ab1 Merge pull request #2480 from itlackey/benchmark/ollama-qwen2.5-14b 2024-11-27 16:51:50 -08:00
Paul Gauthier
d54fbd6592 copy 2024-11-27 15:23:13 -08:00
Paul Gauthier
a1f5bfb746 copy 2024-11-27 09:36:17 -08:00
Paul Gauthier
3ab1018c66 copy 2024-11-27 09:29:35 -08:00
Paul Gauthier
62e96372fa bumped deps to pickup grep-ast 0.4.1 and disable Dart 2024-11-27 09:29:22 -08:00
Paul Gauthier
945d10f554 add event for repo size 2024-11-27 09:27:23 -08:00
Paul Gauthier
f44e5ae5f9 cleanup 2024-11-27 07:46:47 -08:00
Paul Gauthier
a67b665846 copy 2024-11-27 07:37:33 -08:00
Paul Gauthier
8fb23b414c rename 2024-11-27 07:18:21 -08:00
Paul Gauthier (aider)
f5100626a8 refactor: update language file extensions and paths in test code 2024-11-27 07:18:04 -08:00
Paul Gauthier (aider)
9ab46fade7 style: fix linting issues in test_repomap.py 2024-11-27 07:11:49 -08:00
Paul Gauthier (aider)
2ce01b157b refactor: simplify language test map and read from fixture files 2024-11-27 07:11:45 -08:00
itlackey
f714e42e11 Added qwen2.5-coder:14b running via ollama 2024-11-27 09:11:33 -06:00
Paul Gauthier (aider)
447b7af573 refactor: use fixture file for Java test instead of embedded code 2024-11-27 07:10:38 -08:00
Paul Gauthier (aider)
ec2b635a1a refactor: use fixture files for C, C++, and Elixir test content 2024-11-27 07:09:28 -08:00
Paul Gauthier (aider)
7465b4bf91 refactor: move TypeScript and TSX test code to fixture files 2024-11-27 07:06:50 -08:00
Paul Gauthier (aider)
4580fac6fa refactor: move test code samples to fixture files 2024-11-27 07:05:36 -08:00
Paul Gauthier (aider)
8c218e9edc refactor: move remaining language examples to fixture files 2024-11-27 07:05:10 -08:00
Paul Gauthier (aider)
44ceb8f1a0 style: Fix import order and add proper line spacing 2024-11-27 07:04:29 -08:00
Paul Gauthier (aider)
642c1c50fb refactor: move remaining test examples to fixture files 2024-11-27 07:04:23 -08:00
Paul Gauthier (aider)
4de8c25a3f refactor: move language examples to fixture files 2024-11-27 07:03:20 -08:00
Paul Gauthier
565f08a8e9 refactor: clean up test cases and remove redundant language examples 2024-11-27 07:03:18 -08:00
Paul Gauthier (aider)
a85ae206c9 refactor: add initial language test fixtures for C, C++, Elixir and Java 2024-11-27 06:59:58 -08:00
Paul Gauthier (aider)
9e9b5e8d46 feat: enhance language test snippets with comprehensive examples 2024-11-27 06:58:15 -08:00
Paul Gauthier
b623141a8f refactor: reorder test language cases in repomap tests 2024-11-27 06:58:13 -08:00
Paul Gauthier (aider)
631cdc37c4 test: enhance C# test case with more language features and symbols 2024-11-27 06:56:16 -08:00
Paul Gauthier
8d50bc0ef1 fix: correct key symbol in TypeScript test from UserGreeting to UserProps 2024-11-27 06:56:15 -08:00
Paul Gauthier (aider)
ae395fbb8f test: enhance TSX test fixture with more symbols and hooks 2024-11-27 06:54:37 -08:00
Paul Gauthier (aider)
10877a99f1 test: enhance Rust test snippet with trait and struct examples 2024-11-27 06:53:41 -08:00
Paul Gauthier (aider)
0faff91c72 test: enhance Python test case with class and type annotations 2024-11-27 06:52:31 -08:00
Paul Gauthier (aider)
00f79fecd0 test: enhance OCaml test case with module and type definitions 2024-11-27 06:51:53 -08:00
Paul Gauthier (aider)
203128d935 test: enhance Java test case with interface and implementation 2024-11-27 06:51:22 -08:00
Paul Gauthier
4f6e52aed0 test: add assertion for minimum result length in repomap test 2024-11-27 06:51:20 -08:00
Paul Gauthier (aider)
7bc7b2e3da style: Fix line length violations in test_repomap.py 2024-11-27 06:48:17 -08:00
Paul Gauthier (aider)
27f0ca3b08 feat: enhance JavaScript test snippet with class and module exports 2024-11-27 06:48:12 -08:00
Paul Gauthier
2337b2bb3e test: refactor repo map language test to run each language separately 2024-11-27 06:48:10 -08:00
Paul Gauthier (aider)
48ea13e130 style: format code with black and add trailing commas 2024-11-27 06:23:25 -08:00
Paul Gauthier (aider)
5c73ab26c0 test: add key symbol checks for each language parser 2024-11-27 06:23:21 -08:00
Paul Gauthier
947e4ce71d copy 2024-11-26 20:43:09 -08:00
Paul Gauthier (aider)
200295e3ee fix: add error handling for input history file permissions 2024-11-26 20:41:59 -08:00
Paul Gauthier (aider)
ded5fe5ec0 style: remove trailing whitespace in analytics.py 2024-11-26 20:37:34 -08:00
Paul Gauthier (aider)
7dffa943fa fix: handle analytics file access errors gracefully 2024-11-26 20:37:29 -08:00
Paul Gauthier
f702f67e27 copy 2024-11-26 20:28:48 -08:00
Paul Gauthier
a64956406d fix: improve PDF support detection and update model suggestions 2024-11-26 20:26:44 -08:00
Paul Gauthier
9c8bde2cff copy 2024-11-26 20:13:33 -08:00
Paul Gauthier
70a282ebf1 copy 2024-11-26 20:11:12 -08:00
Paul Gauthier (aider)
415652d38e style: reorder subprocess args to keep cwd consistent 2024-11-26 19:55:07 -08:00
Paul Gauthier (aider)
1a745e4fa9 fix: set cwd to repo root for shell commands in cmd_run 2024-11-26 19:53:09 -08:00
Paul Gauthier (aider)
973e86df27 fix: use cwd=root when executing shell commands 2024-11-26 19:52:46 -08:00
Paul Gauthier (aider)
1f2917681f feat: add cwd parameter to run_cmd for directory control 2024-11-26 19:52:06 -08:00
Paul Gauthier (aider)
28d1feacf5 refactor: simplify image file processing to use provided filenames directly 2024-11-26 19:49:53 -08:00
Paul Gauthier (aider)
cf4ef8605d style: remove trailing whitespace in base_coder.py 2024-11-26 19:48:48 -08:00
Paul Gauthier (aider)
4be3728273 feat: add image file support for read-only files 2024-11-26 19:48:42 -08:00
Paul Gauthier (aider)
34b8c3f47c fix: add dummy message to prevent empty cur_messages in test 2024-11-26 19:46:51 -08:00
Paul Gauthier (aider)
309893fd1e test: verify image file presence in LLM messages for read-only command 2024-11-26 19:45:01 -08:00
Paul Gauthier
705eb06e8d copy 2024-11-26 17:28:15 -08:00
Paul Gauthier
5cfcf255e9 fix: override PDF support detection for Claude 3.5 Sonnet 2024-11-26 17:27:05 -08:00
Paul Gauthier
b8f36c8277 feat: add PDF file support and refactor image handling 2024-11-26 17:19:28 -08:00
Paul Gauthier (aider)
73c1dc697f style: remove trailing whitespace in base_coder.py 2024-11-26 17:16:50 -08:00
Paul Gauthier (aider)
a9c4647461 feat: add support for both images and PDFs based on model capabilities 2024-11-26 17:16:43 -08:00
Paul Gauthier
aaeaa24153 feat: add PDF support and improve image handling in chat messages 2024-11-26 17:16:41 -08:00
Paul Gauthier
b2232cda7b refactor: modify check_for_urls to return modified input string 2024-11-26 15:04:19 -08:00
Paul Gauthier (aider)
3ba4aca268 refactor: improve error handling for .gitignore file operations 2024-11-26 15:01:50 -08:00
Paul Gauthier (aider)
f45533e20b style: fix line length and whitespace in gitignore handling 2024-11-26 15:00:38 -08:00
Paul Gauthier (aider)
2e7c5d6cfa fix: handle permission error when writing to .gitignore 2024-11-26 15:00:33 -08:00
Paul Gauthier
476acc7715 copy 2024-11-26 14:23:18 -08:00
Paul Gauthier
ab3b50296c copy 2024-11-26 14:12:16 -08:00
Paul Gauthier
60c29b2839 copy 2024-11-26 13:36:48 -08:00
Paul Gauthier
7972f5f4bc copy 2024-11-26 12:37:58 -08:00
Paul Gauthier
83a6865eb3 set version to 0.65.2.dev 2024-11-26 12:37:24 -08:00
Paul Gauthier
29a9b650ed version bump to 0.65.1 2024-11-26 12:35:54 -08:00
Paul Gauthier
dd48b740f9 test: update Claude model name tests to use 3.5 version 2024-11-26 12:33:58 -08:00
Paul Gauthier
401ce7a63d copy 2024-11-26 12:32:08 -08:00
Paul Gauthier
8218d085f7 fix: update Claude 3 model aliases to 3.5 versions 2024-11-26 12:31:54 -08:00
Paul Gauthier (aider)
554d274fff style: fix string quote consistency in blame.py 2024-11-26 10:47:04 -08:00
Paul Gauthier (aider)
13318219db feat: add YAML update capability to blame.py for --all-since 2024-11-26 10:46:55 -08:00
Paul Gauthier
61759f984c copy 2024-11-26 09:16:46 -08:00
Paul Gauthier
a73e77a819 set version to 0.65.1.dev 2024-11-26 08:57:36 -08:00
Paul Gauthier
b5f1659382 version bump to 0.65.0 2024-11-26 08:55:57 -08:00
Paul Gauthier
beb6722f57 copy 2024-11-26 08:04:17 -08:00
Paul Gauthier
a052b89152 copy 2024-11-26 07:42:16 -08:00
Paul Gauthier
95b391350f bumped deps 2024-11-26 07:41:31 -08:00
Paul Gauthier
5773eac03f copy 2024-11-26 07:37:56 -08:00
Paul Gauthier (aider)
da4ace2875 style: Fix string quote consistency in update-history.py 2024-11-26 07:31:36 -08:00
Paul Gauthier (aider)
e507c5b502 feat: add aider contribution percentage to history updates 2024-11-26 07:31:32 -08:00
Paul Gauthier (aider)
722c2c2668 style: fix linting errors in test_repomap.py 2024-11-26 07:28:02 -08:00
Paul Gauthier (aider)
8e7bfef9f1 test: add Dart language support to repo map tests 2024-11-26 07:27:57 -08:00
Paul Gauthier
1811f0d0d5 Merge branch 'main' of github.com:Aider-AI/aider 2024-11-26 07:27:32 -08:00
paul-gauthier
6b9d534fe2 Merge pull request #2236 from malkoG/support-dart
Add dart support
2024-11-26 07:26:43 -08:00
Paul Gauthier (aider)
dc4562a845 feat: add docs update step after aider in update-history script 2024-11-26 07:10:32 -08:00
Paul Gauthier
c15af63bc9 copy 2024-11-26 07:10:03 -08:00
Paul Gauthier
5b68c2c7d9 fix test 2024-11-26 07:09:49 -08:00
Paul Gauthier (aider)
b70e0bd1f6 test: update invalid edit format test to check return code instead of SystemExit 2024-11-26 07:07:11 -08:00
Paul Gauthier (aider)
743f0f5540 test: add test for invalid edit format handling 2024-11-26 07:06:28 -08:00
Paul Gauthier (aider)
e647a5b733 feat: add tests for UnknownEditFormat exception handling 2024-11-26 07:04:43 -08:00
Paul Gauthier (aider)
df1d259e42 style: Fix linting issues in test_coder.py 2024-11-26 07:04:21 -08:00
Paul Gauthier (aider)
e3efab7fbf test: add UnknownEditFormat exception tests 2024-11-26 07:04:15 -08:00
Paul Gauthier (aider)
cd79f7f4b0 fix: import UnknownEditFormat exception from base_coder module 2024-11-26 07:03:26 -08:00
Paul Gauthier (aider)
5d175745bf fix: handle None values in valid_formats list generation 2024-11-26 07:02:34 -08:00
Paul Gauthier (aider)
e648bac74b fix: remove circular import of UnknownEditFormat in base_coder.py 2024-11-26 07:01:55 -08:00
Paul Gauthier (aider)
e5c0ebd0a0 style: fix import order in base_coder.py 2024-11-26 07:01:21 -08:00
Paul Gauthier (aider)
608a43402c refactor: move UnknownEditFormat exception to base_coder.py 2024-11-26 07:01:14 -08:00
Paul Gauthier (aider)
75bc2dd564 feat: add custom UnknownEditFormat exception with documentation link 2024-11-26 07:00:16 -08:00
Paul Gauthier
bbd81c3cf7 copy 2024-11-26 06:56:36 -08:00
Paul Gauthier
301eb7c74d copy 2024-11-26 06:50:18 -08:00
Paul Gauthier
a756039f27 copy 2024-11-26 06:49:28 -08:00
Paul Gauthier (aider)
b4d1b71ee7 test: add test for skipping duplicate basename file mentions 2024-11-26 06:46:53 -08:00
Paul Gauthier (aider)
8d85a4754d style: fix line wrapping in base_coder.py 2024-11-26 06:46:08 -08:00
Paul Gauthier (aider)
a1b48049a9 feat: skip files with duplicate basenames in file mentions 2024-11-26 06:46:01 -08:00
Paul Gauthier
6789844c1f copy 2024-11-26 06:42:42 -08:00
Paul Gauthier
c602a839ca better update history script 2024-11-26 06:42:36 -08:00
Paul Gauthier
01c7793e90 revert changes to get_ident_filename_matches() 2024-11-26 06:41:08 -08:00
Paul Gauthier (aider)
47b013b034 docs: update changelog with recent improvements and features 2024-11-26 06:38:59 -08:00
Paul Gauthier
dc2047804a docs: update changelog with URL detection and model alias features 2024-11-26 06:38:57 -08:00
Paul Gauthier (aider)
885e5cbd7c style: fix whitespace in base_coder.py 2024-11-26 06:33:37 -08:00
Paul Gauthier (aider)
635a5196e8 fix: exclude files with same name as existing chat or read-only files 2024-11-26 06:33:29 -08:00
Paul Gauthier
cbd339190b refactor: simplify file name matching logic in get_all_relative_files 2024-11-26 06:33:28 -08:00
Paul Gauthier (aider)
266093189d style: fix linting issues in get_ident_filename_matches 2024-11-26 06:26:26 -08:00
Paul Gauthier (aider)
62b02d4370 fix: remove incorrect file name filtering in get_ident_filename_matches 2024-11-26 06:25:15 -08:00
Paul Gauthier (aider)
8546a1dc86 docs: add main branch changes to release history 2024-11-26 06:25:00 -08:00
Paul Gauthier
ae98bf237f docs: remove unreleased changes from history file 2024-11-26 06:24:59 -08:00
Paul Gauthier (aider)
0398deb005 test: add tests for filename matching with existing and read-only files 2024-11-26 06:24:11 -08:00
Paul Gauthier (aider)
3a1492977b fix: skip suggesting files with same name as existing chat files 2024-11-26 06:22:57 -08:00
Paul Gauthier (aider)
ef40a456e8 docs: add main branch section with latest changes to history 2024-11-26 06:20:04 -08:00
Paul Gauthier
583b78c0c1 docs: update release history to remove unreleased changes 2024-11-26 06:20:03 -08:00
Paul Gauthier (aider)
c0988de581 docs: update release history with v0.64.1 and v0.64.2 changes 2024-11-26 06:17:06 -08:00
Paul Gauthier
34a190e29b docs: remove v0.64.2 release notes 2024-11-26 06:17:04 -08:00
Paul Gauthier (aider)
0940598708 docs: add v0.64.2 release notes to history 2024-11-26 06:12:13 -08:00
Paul Gauthier (aider)
7e6cbb3efa style: format update-history.py with black 2024-11-26 06:10:07 -08:00
Paul Gauthier (aider)
7dc4e00c75 feat: dynamically determine base version for history updates 2024-11-26 06:10:03 -08:00
Paul Gauthier (aider)
8a598eacaf style: format update-history.py with black 2024-11-26 06:09:44 -08:00
Paul Gauthier (aider)
8592fad9cd feat: add script to automate history updates from git diffs 2024-11-26 06:09:41 -08:00
Paul Gauthier
fa72a89d35 feat: add script to update history file 2024-11-26 06:09:38 -08:00
Paul Gauthier
5e99c51d93 copy 2024-11-26 06:01:46 -08:00
Paul Gauthier (aider)
905976e765 feat: add verbose and trace flags for Jekyll debugging 2024-11-26 05:59:52 -08:00
Paul Gauthier
3ebd47d3db copy 2024-11-25 21:12:01 -08:00
Paul Gauthier
bf4c7c475a copy 2024-11-25 21:08:27 -08:00
Paul Gauthier (aider)
bf38371971 test: add test cases for model name aliases 2024-11-25 21:06:29 -08:00
Paul Gauthier
18460f4f91 copy 2024-11-25 21:05:59 -08:00
Paul Gauthier (aider)
f94e3e6aba feat: add cog script to auto-generate model aliases docs 2024-11-25 21:03:35 -08:00
Paul Gauthier
1647da2942 docs: add frontmatter to model aliases documentation 2024-11-25 21:03:34 -08:00
Paul Gauthier (aider)
86e2cdb1fb docs: add documentation for model aliases configuration 2024-11-25 21:01:11 -08:00
Paul Gauthier
764702a377 docs: add model aliases configuration documentation 2024-11-25 21:01:10 -08:00
Paul Gauthier (aider)
837a97ffdf style: fix string quote consistency in alias split 2024-11-25 20:56:10 -08:00
Paul Gauthier (aider)
217e9b96d8 feat: add command-line model alias support via --alias flag 2024-11-25 20:56:04 -08:00
Paul Gauthier (aider)
524274fcf4 refactor: consolidate model aliases into central dictionary 2024-11-25 20:54:24 -08:00
Paul Gauthier
6d5f576b92 refactor: simplify model aliases to only include Claude-3 variants 2024-11-25 20:54:23 -08:00
Paul Gauthier (aider)
445f9fa7df feat: add model name aliases with canonical mapping 2024-11-25 20:51:37 -08:00
Paul Gauthier (aider)
2ff3a23606 fix: add num_ctx parameter to run_test_real function 2024-11-25 19:21:08 -08:00
Paul Gauthier (aider)
c5ce57ea7f style: fix linting issues in benchmark.py 2024-11-25 19:20:49 -08:00
Paul Gauthier (aider)
351b8e50f0 feat: add --num-ctx flag to override model context window size 2024-11-25 19:20:43 -08:00
Paul Gauthier
68be6c5742 copy 2024-11-25 19:11:18 -08:00
Paul Gauthier
7a34a2dfa9 ask 2.5% of users to opt-in to analytics 2024-11-25 19:09:59 -08:00
Paul Gauthier
49ce9e1209 copy 2024-11-25 18:42:28 -08:00
Paul Gauthier (aider)
c84e192324 style: remove extra blank line in test_coder.py 2024-11-25 18:42:09 -08:00
Paul Gauthier (aider)
d696673f07 test: remove shell command testing from URL detection test 2024-11-25 18:42:03 -08:00
Paul Gauthier (aider)
2957d463c9 test: add assertion for suggest_shell_commands flag 2024-11-25 18:41:21 -08:00
Paul Gauthier (aider)
af48e50898 test: add tests for URL detection functionality in Coder class 2024-11-25 18:39:49 -08:00
Paul Gauthier (aider)
f4b964a4b8 test: add tests for --[no-]detect-urls CLI option 2024-11-25 18:39:10 -08:00
Paul Gauthier
a79ce7a151 fix: remove duplicate detect_urls parameter and add to coder initialization 2024-11-25 18:38:34 -08:00
Paul Gauthier (aider)
21bb83c55a fix: remove duplicate detect_urls parameter in Coder class 2024-11-25 18:36:25 -08:00
Paul Gauthier (aider)
7122ceb16c feat: add --detect-urls flag to control URL detection behavior 2024-11-25 18:34:05 -08:00
Paul Gauthier
f9bcfe341c ignore .gitattributes 2024-11-25 15:09:46 -08:00
Paul Gauthier
13c5bfdd88 copy 2024-11-25 09:05:52 -08:00
Paul Gauthier
bf79c2cb99 copy 2024-11-25 08:28:42 -08:00
Paul Gauthier
325cdfcf57 refactor: Update Ollama model detection and context window documentation 2024-11-24 16:14:34 -08:00
Paul Gauthier
7d14d4ade9 copy 2024-11-24 15:23:47 -08:00
Paul Gauthier
1b7d12194e copy 2024-11-24 15:23:42 -08:00
Paul Gauthier (aider)
91f238aded style: Organize imports and remove unused import 2024-11-24 15:20:47 -08:00
Paul Gauthier (aider)
78ff489995 feat: Add ModelSettings import to main.py 2024-11-24 15:20:43 -08:00
Paul Gauthier (aider)
ff791439e2 fix: Import fields and ModelSettings to resolve flake8 errors 2024-11-24 15:20:35 -08:00
Paul Gauthier
3f8b2d6b99 refactor: Improve model info output with detailed settings display 2024-11-24 15:20:24 -08:00
Paul Gauthier
4dcbce58ed copy 2024-11-24 14:55:02 -08:00
Paul Gauthier
0427deb897 copy 2024-11-24 14:54:19 -08:00
Paul Gauthier
f3eb3409e3 copy 2024-11-24 14:54:12 -08:00
Paul Gauthier
86619052ca copy 2024-11-24 14:52:01 -08:00
Paul Gauthier
0c59d3234e copy 2024-11-24 12:03:21 -08:00
Paul Gauthier
939d7ea3fb copy 2024-11-24 12:02:48 -08:00
Paul Gauthier
dc8761763d copy 2024-11-24 07:56:12 -08:00
Paul Gauthier
4894914db1 copy 2024-11-24 07:50:19 -08:00
Paul Gauthier
aee94a0584 copy 2024-11-24 07:14:09 -08:00
Paul Gauthier (aider)
c550422168 feat: Add x-axis label "Provider: quantization" to chart 2024-11-24 07:13:14 -08:00
Paul Gauthier (aider)
cebd9cabb3 feat: Add real-time filtering to chart based on search input 2024-11-24 07:12:39 -08:00
Paul Gauthier (aider)
3f16652d56 feat: Escape colons in model names with quotes in YAML file 2024-11-24 07:06:41 -08:00
Paul Gauthier
2ebf48ca71 feat: Update quant.yml with refined model descriptions and new test results 2024-11-24 07:06:39 -08:00
Paul Gauthier
c2f184f5bb copy 2024-11-24 06:21:58 -08:00
Paul Gauthier
e56651e5c0 copy 2024-11-23 20:27:43 -08:00
Paul Gauthier
1d09e96127 copy 2024-11-23 20:19:11 -08:00
Paul Gauthier
73de0ea8be copy 2024-11-23 20:18:19 -08:00
Paul Gauthier
757eac0579 copy 2024-11-23 18:39:20 -08:00
Paul Gauthier
bb78e2f57f default Ollama num_ctx to 8k 2024-11-23 18:14:30 -08:00
Paul Gauthier
92579243c5 copy 2024-11-23 18:13:58 -08:00
Paul Gauthier (aider)
8d0ba40d67 feat: Sort chart data by pass rate in descending order 2024-11-23 17:48:34 -08:00
Paul Gauthier
ff8c1aace9 copy 2024-11-23 15:32:09 -08:00
Paul Gauthier
cf74dc9b48 copy 2024-11-23 15:30:28 -08:00
Paul Gauthier (aider)
e63df83091 refactor: Consolidate subprocess.call mock in test_pipe_editor 2024-11-23 14:23:26 -08:00
Paul Gauthier
14522dbbcd copy 2024-11-23 14:20:58 -08:00
Paul Gauthier
91daea9e01 simplify if 2024-11-23 14:15:36 -08:00
Paul Gauthier (aider)
12b789fc4e fix: Handle Mixpanel connection errors by disabling tracking 2024-11-23 14:14:11 -08:00
Paul Gauthier
baa13351a6 refactor: Improve model registration and settings handling 2024-11-23 12:57:24 -08:00
Paul Gauthier
8f83204f0f Merge branch 'main' of github.com:Aider-AI/aider 2024-11-23 12:37:26 -08:00
paul-gauthier
80f5b60e1d Merge pull request #2440 from ivanfioravanti/main
MLX 4bit and 8bit diff added
2024-11-23 12:36:18 -08:00
Paul Gauthier
54525f6696 Merge branch 'main' of github.com:Aider-AI/aider 2024-11-23 12:35:36 -08:00
Ivan Fioravanti
3dc50216b5 Merge branch 'Aider-AI:main' into main 2024-11-23 19:52:23 +01:00
ivanfioravanti
324430a696 quant.yml mlx-community/Qwen2.5-Coder-32B-Instruct-8bit added 2024-11-23 19:51:48 +01:00
paul-gauthier
65d7957610 Update modes.md 2024-11-23 09:36:43 -08:00
paul-gauthier
6ac4993cf2 Update modes.md 2024-11-23 09:36:26 -08:00
ivanfioravanti
100744a952 Article updated to reflect change in mlx test 2024-11-23 17:34:01 +01:00
ivanfioravanti
3a331e55dc mlx 4bit diff 2024-11-23 17:32:08 +01:00
Paul Gauthier
a57f81ba5f copy 2024-11-23 07:02:58 -08:00
Paul Gauthier
ca0b55fbbf copy 2024-11-23 06:53:54 -08:00
Paul Gauthier
38a3cf98dd gemini 2024-11-22 19:45:52 -08:00
Paul Gauthier
1234ad92e5 gemini-exp-1114 2024-11-22 19:43:26 -08:00
Paul Gauthier
488edc24ce gemini-exp-1121 2024-11-22 19:42:11 -08:00
Paul Gauthier
307c23631a copy 2024-11-22 17:20:38 -08:00
Paul Gauthier
83d2241883 copy 2024-11-22 16:40:47 -08:00
Paul Gauthier
f9126416e8 copy 2024-11-22 16:38:02 -08:00
Paul Gauthier
4e9ae16cb3 copy 2024-11-22 15:42:10 -08:00
Paul Gauthier
ef8bfdffa7 Merge branch 'main' of github.com:Aider-AI/aider 2024-11-22 15:41:27 -08:00
paul-gauthier
bfb090331f Merge pull request #2432 from ivanfioravanti/main 2024-11-22 15:40:42 -08:00
Paul Gauthier
5506d0f25b fixed xai docs 2024-11-22 14:21:00 -08:00
Ivan Fioravanti
6ebd2d0883 mlx Qwen2.5-Coder-32B-Instruct-4bit added to quant blog
mlx Qwen2.5-Coder-32B-Instruct-4bit added to quant blog
2024-11-22 22:24:13 +01:00
Paul Gauthier
a16dcaba4e copy 2024-11-22 11:38:14 -08:00
Paul Gauthier
f06452c6c5 copy 2024-11-22 10:56:33 -08:00
Paul Gauthier (aider)
6a0a97cb41 feat: Add host.docker.internal gateway to enable Ollama server access from container 2024-11-22 10:07:47 -08:00
Paul Gauthier
711102b438 copy 2024-11-22 09:39:23 -08:00
Paul Gauthier (aider)
6d53eb0aaa style: Increase canvas height for taller graph aspect ratio 2024-11-22 07:40:46 -08:00
Paul Gauthier
0ccf04a2c5 copy 2024-11-22 07:32:25 -08:00
Paul Gauthier
070ce35b44 copy 2024-11-22 07:32:00 -08:00
Paul Gauthier
a8296e5de5 copy 2024-11-22 07:29:50 -08:00
Paul Gauthier (aider)
d17f25e975 feat: Add interactive table with quantization model performance data 2024-11-22 07:17:23 -08:00
Paul Gauthier
23095ada85 docs: Update Qwen 2.5 Coder 32B Instruct model comparison with Hyperbolic labs API 2024-11-22 07:16:01 -08:00
Paul Gauthier
f9ef161991 copy 2024-11-22 07:07:41 -08:00
Paul Gauthier
28004bae2f copy 2024-11-22 07:06:43 -08:00
Paul Gauthier
17aef7be7d copy 2024-11-22 06:09:32 -08:00
Paul Gauthier
ebba8f5110 fix ollama models included in quant blog 2024-11-22 06:01:01 -08:00
Paul Gauthier
dbd7f51f5c fix ollama models included in quant blog 2024-11-22 05:56:03 -08:00
Paul Gauthier
fbadfcfa7c refactor: Remove unnecessary input logging and history tracking in pipe_editor command 2024-11-21 18:37:51 -08:00
Paul Gauthier
2c12234604 refactor: Update input handling to set and use placeholder text 2024-11-21 18:34:24 -08:00
Paul Gauthier (aider)
13cb6a315c style: Apply linter formatting to io.py 2024-11-21 18:32:03 -08:00
Paul Gauthier (aider)
48e7376002 feat: Add placeholder functionality to input prompts 2024-11-21 18:31:57 -08:00
Paul Gauthier
60d82eddee added Qwen2 72B Instruct to over time 2024-11-21 16:49:24 -08:00
Paul Gauthier (aider)
30ee89c7e9 style: Fix linting issues in over_time.py 2024-11-21 16:45:11 -08:00
Paul Gauthier (aider)
25bcea6aec feat: Add print of model release dates and names in sorted order 2024-11-21 16:45:07 -08:00
Paul Gauthier
488c88da91 update over time graphs 2024-11-21 14:19:31 -08:00
Paul Gauthier (aider)
8fdcd92260 feat: Update plot save paths to website assets directory 2024-11-21 14:19:05 -08:00
Paul Gauthier
781a40df52 fix: Update Gemini Pro legend label to Gemini 1.5 Pro 2024-11-21 14:19:03 -08:00
Paul Gauthier
2412c81d92 copy 2024-11-21 14:09:34 -08:00
Paul Gauthier (aider)
a7fc0f9d2e feat: Add color and legend support for Gemini Pro models 2024-11-21 14:02:27 -08:00
Paul Gauthier
9eab021a50 refactor: Modify offer_url method and release notes handling in main 2024-11-21 14:02:01 -08:00
Paul Gauthier (aider)
c189a52e5e style: Organize imports and apply linter formatting 2024-11-21 14:00:24 -08:00
Paul Gauthier (aider)
6d6d763dd3 refactor: Restructure benchmark plotting script for improved maintainability 2024-11-21 14:00:20 -08:00
Paul Gauthier
3cfbaa0ed6 copy 2024-11-21 13:07:18 -08:00
Paul Gauthier
e1b4571fdf set version to 0.64.2.dev 2024-11-21 12:58:23 -08:00
Paul Gauthier
08027ea9c4 version bump to 0.64.1 2024-11-21 12:56:49 -08:00
Paul Gauthier
d0528a00c1 copy 2024-11-21 12:55:10 -08:00
Paul Gauthier
7ae6a2ba9a disable o1 streaming on openrouter 2024-11-21 12:54:23 -08:00
Paul Gauthier
ad0e5c4770 copy 2024-11-21 12:53:21 -08:00
malkoG
a8a3e2401b Update languages.md for dart support 2024-11-04 13:22:22 +09:00
malkoG
a7f59a2e2b Add tags.scm for dart 2024-11-04 12:38:27 +09:00
92 changed files with 4351 additions and 2303 deletions

View File

@@ -24,6 +24,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3

View File

@@ -12,6 +12,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3

View File

@@ -36,7 +36,9 @@ jobs:
working-directory: aider/website
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:

View File

@@ -12,6 +12,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5

View File

@@ -25,12 +25,19 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y libportaudio2
- name: Install dependencies
run: |
python -m pip install --upgrade pip
@@ -38,5 +45,7 @@ jobs:
pip install .
- name: Run tests
env:
AIDER_ANALYTICS: false
run: |
pytest

View File

@@ -25,6 +25,8 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
@@ -38,6 +40,8 @@ jobs:
pip install .
- name: Run tests
env:
AIDER_ANALYTICS: false
run: |
pytest

4
.gitignore vendored
View File

@@ -11,4 +11,6 @@ _site
.jekyll-cache/
.jekyll-metadata
aider/__version__.py
.venv/
aider/_version.py
.venv/
.gitattributes

View File

@@ -1,8 +1,46 @@
# Release history
### main branch
- PDF support for Sonnet and Gemini models.
- Added `--voice-input-device` to select audio input device for voice recording, by @preynal.
- Added `--timeout` option to configure API call timeouts.
- Set cwd to repo root when running shell commands.
- Added Ctrl-Up/Down keyboard shortcuts for per-message history navigation.
- Improved error handling for failed .gitignore file operations.
- Improved error handling for input history file permissions.
- Improved error handling for analytics file access.
- Removed spurious warning about disabling pretty in VSCode.
- Removed broken support for Dart.
- Bugfix when scraping URLs found in chat messages.
- Better handling of __version__ import errors.
- Improved `/drop` command to support substring matching for non-glob patterns.
- Aider wrote 82% of the code in this release.
### Aider v0.65.1
- Bugfix to `--alias`.
### Aider v0.65.0
- Added `--alias` config to define [custom model aliases](https://aider.chat/docs/config/model-aliases.html).
- Added `--[no-]detect-urls` flag to disable detecting and offering to scrape URLs found in the chat.
- Ollama models now default to an 8k context window.
- Added [RepoMap support for Dart language](https://aider.chat/docs/languages.html) by @malkoG.
- Ask 2.5% of users if they want to opt-in to [analytics](https://aider.chat/docs/more/analytics.html).
- Skip suggesting files that share names with files already in chat.
- `/editor` returns and prefill the file content into the prompt, so you can use `/editor` to compose messages that start with `/commands`, etc.
- Enhanced error handling for analytics.
- Improved handling of UnknownEditFormat exceptions with helpful documentation links.
- Bumped dependencies to pick up grep-ast 0.4.0 for Dart language support.
- Aider wrote 81% of the code in this release.
### Aider v0.64.1
- Disable streaming for o1 on OpenRouter.
### Aider v0.64.0
- Added [`/editor` command](https://aider.chat/docs/usage/commands.html) to open system editor for writing prompts, by @thehunmonkgroup.
- Full support for `gpt-4o-2024-11-20`.
- Stream o1 models by default.

View File

@@ -1,6 +1,20 @@
from packaging import version
__version__ = "0.66.1.dev"
safe_version = __version__
try:
from aider.__version__ import __version__
from aider._version import __version__
except Exception:
__version__ = "0.64.1.dev"
__version__ = safe_version + "+import"
if type(__version__) is not str:
__version__ = safe_version + "+type"
else:
try:
if version.parse(__version__) < version.parse(safe_version):
__version__ = safe_version + "+less"
except Exception:
__version__ = safe_version + "+parse"
__all__ = [__version__]

View File

@@ -5,7 +5,7 @@ import time
import uuid
from pathlib import Path
from mixpanel import Mixpanel
from mixpanel import Mixpanel, MixpanelException
from posthog import Posthog
from aider import __version__
@@ -78,7 +78,7 @@ class Analytics:
if not self.user_id:
return False
PERCENT = 1
PERCENT = 2.5
return self.is_uuid_in_percentage(self.user_id, PERCENT)
def is_uuid_in_percentage(self, uuid_str, percent):
@@ -105,9 +105,14 @@ class Analytics:
return uuid_str[:6] <= threshold
def get_data_file_path(self):
data_file = Path.home() / ".aider" / "analytics.json"
data_file.parent.mkdir(parents=True, exist_ok=True)
return data_file
try:
data_file = Path.home() / ".aider" / "analytics.json"
data_file.parent.mkdir(parents=True, exist_ok=True)
return data_file
except OSError:
# If we can't create/access the directory, just disable analytics
self.disable(permanently=False)
return None
def get_or_create_uuid(self):
self.load_data()
@@ -119,6 +124,9 @@ class Analytics:
def load_data(self):
data_file = self.get_data_file_path()
if not data_file:
return
if data_file.exists():
try:
data = json.loads(data_file.read_text())
@@ -130,14 +138,20 @@ class Analytics:
def save_data(self):
data_file = self.get_data_file_path()
if not data_file:
return
data = dict(
uuid=self.user_id,
permanently_disable=self.permanently_disable,
asked_opt_in=self.asked_opt_in,
)
# Allow exceptions; crash if we can't record permanently_disabled=True, etc
data_file.write_text(json.dumps(data, indent=4))
try:
data_file.write_text(json.dumps(data, indent=4))
except OSError:
# If we can't write the file, just disable analytics
self.disable(permanently=False)
def get_system_info(self):
return {
@@ -159,7 +173,7 @@ class Analytics:
return None
def event(self, event_name, main_model=None, **kwargs):
if not (self.mp or self.ph) and not self.logfile:
if not self.mp and not self.ph and not self.logfile:
return
properties = {}
@@ -182,7 +196,10 @@ class Analytics:
properties["aider_version"] = __version__
if self.mp:
self.mp.track(self.user_id, event_name, dict(properties))
try:
self.mp.track(self.user_id, event_name, dict(properties))
except MixpanelException:
self.mp = None # Disable mixpanel on connection errors
if self.ph:
self.ph.capture(self.user_id, event_name, dict(properties))
@@ -197,7 +214,3 @@ class Analytics:
with open(self.logfile, "a") as f:
json.dump(log_entry, f)
f.write("\n")
def __del__(self):
if self.ph:
self.ph.shutdown()

View File

@@ -193,12 +193,24 @@ def get_parser(default_config_files, git_root):
default=".aider.model.metadata.json",
help="Specify a file with context window and costs for unknown models",
)
group.add_argument(
"--alias",
action="append",
metavar="ALIAS:MODEL",
help="Add a model alias (can be used multiple times)",
)
group.add_argument(
"--verify-ssl",
action=argparse.BooleanOptionalAction,
default=True,
help="Verify the SSL cert when connecting to models (default: True)",
)
group.add_argument(
"--timeout",
type=int,
default=None,
help="Timeout in seconds for API calls (default: None)",
)
group.add_argument(
"--edit-format",
"--chat-mode",
@@ -553,7 +565,7 @@ def get_parser(default_config_files, git_root):
group.add_argument(
"--test",
action="store_true",
help="Run tests and fix problems found",
help="Run tests, fix problems found and then exit",
default=False,
)
@@ -738,6 +750,12 @@ def get_parser(default_config_files, git_root):
default=True,
help="Enable/disable fancy input with history and completion (default: True)",
)
group.add_argument(
"--detect-urls",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable detection and offering to add URLs to chat (default: True)",
)
group.add_argument(
"--editor",
help="Specify which editor to use for the /editor command",
@@ -758,6 +776,12 @@ def get_parser(default_config_files, git_root):
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
group.add_argument(
"--voice-input-device",
metavar="VOICE_INPUT_DEVICE",
default=None,
help="Specify the input device name for voice recording",
)
return parser

View File

@@ -37,6 +37,15 @@ from ..dump import dump # noqa: F401
from .chat_chunks import ChatChunks
class UnknownEditFormat(ValueError):
def __init__(self, edit_format, valid_formats):
self.edit_format = edit_format
self.valid_formats = valid_formats
super().__init__(
f"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}"
)
class MissingAPIKeyError(ValueError):
pass
@@ -91,6 +100,7 @@ class Coder:
cache_warming_thread = None
num_cache_warming_pings = 0
suggest_shell_commands = True
detect_urls = True
ignore_mentions = None
chat_language = None
@@ -156,7 +166,12 @@ class Coder:
res.original_kwargs = dict(kwargs)
return res
raise ValueError(f"Unknown edit format {edit_format}")
valid_formats = [
str(c.edit_format)
for c in coders.__all__
if hasattr(c, "edit_format") and c.edit_format is not None
]
raise UnknownEditFormat(edit_format, valid_formats)
def clone(self, **kwargs):
new_coder = Coder.create(from_coder=self, **kwargs)
@@ -267,6 +282,7 @@ class Coder:
num_cache_warming_pings=0,
suggest_shell_commands=True,
chat_language=None,
detect_urls=True,
):
# Fill in a dummy Analytics if needed, but it is never .enable()'d
self.analytics = analytics if analytics is not None else Analytics()
@@ -280,6 +296,7 @@ class Coder:
self.ignore_mentions = set()
self.suggest_shell_commands = suggest_shell_commands
self.detect_urls = detect_urls
self.num_cache_warming_pings = num_cache_warming_pings
@@ -648,6 +665,8 @@ class Coder:
def get_readonly_files_messages(self):
readonly_messages = []
# Handle non-image files
read_only_content = self.get_read_only_files_content()
if read_only_content:
readonly_messages += [
@@ -659,6 +678,15 @@ class Coder:
content="Ok, I will use these files as references.",
),
]
# Handle image files
images_message = self.get_images_message(self.abs_read_only_fnames)
if images_message is not None:
readonly_messages += [
images_message,
dict(role="assistant", content="Ok, I will use these images as references."),
]
return readonly_messages
def get_chat_files_messages(self):
@@ -680,7 +708,7 @@ class Coder:
dict(role="assistant", content=files_reply),
]
images_message = self.get_images_message()
images_message = self.get_images_message(self.abs_fnames)
if images_message is not None:
chat_files_messages += [
images_message,
@@ -689,23 +717,42 @@ class Coder:
return chat_files_messages
def get_images_message(self):
if not self.main_model.info.get("supports_vision"):
def get_images_message(self, fnames):
supports_images = self.main_model.info.get("supports_vision")
supports_pdfs = self.main_model.info.get("supports_pdf_input") or self.main_model.info.get(
"max_pdf_size_mb"
)
# https://github.com/BerriAI/litellm/pull/6928
supports_pdfs = supports_pdfs or "claude-3-5-sonnet-20241022" in self.main_model.name
if not (supports_images or supports_pdfs):
return None
image_messages = []
for fname, content in self.get_abs_fnames_content():
if is_image_file(fname):
with open(fname, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
mime_type, _ = mimetypes.guess_type(fname)
if mime_type and mime_type.startswith("image/"):
image_url = f"data:{mime_type};base64,{encoded_string}"
rel_fname = self.get_rel_fname(fname)
image_messages += [
{"type": "text", "text": f"Image file: {rel_fname}"},
{"type": "image_url", "image_url": {"url": image_url, "detail": "high"}},
]
for fname in fnames:
if not is_image_file(fname):
continue
mime_type, _ = mimetypes.guess_type(fname)
if not mime_type:
continue
with open(fname, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
image_url = f"data:{mime_type};base64,{encoded_string}"
rel_fname = self.get_rel_fname(fname)
if mime_type.startswith("image/") and supports_images:
image_messages += [
{"type": "text", "text": f"Image file: {rel_fname}"},
{"type": "image_url", "image_url": {"url": image_url, "detail": "high"}},
]
elif mime_type == "application/pdf" and supports_pdfs:
image_messages += [
{"type": "text", "text": f"PDF file: {rel_fname}"},
{"type": "image_url", "image_url": image_url},
]
if not image_messages:
return None
@@ -724,6 +771,7 @@ class Coder:
self.lint_outcome = None
self.test_outcome = None
self.shell_commands = []
self.message_cost = 0
if self.repo:
self.commit_before_message.append(self.repo.get_head_commit_sha())
@@ -767,7 +815,7 @@ class Coder:
return self.commands.run(inp)
self.check_for_file_mentions(inp)
self.check_for_urls(inp)
inp = self.check_for_urls(inp)
return inp
@@ -812,9 +860,11 @@ class Coder:
def check_for_urls(self, inp: str) -> List[str]:
"""Check input for URLs and offer to add them to the chat."""
if not self.detect_urls:
return inp
url_pattern = re.compile(r"(https?://[^\s/$.?#].[^\s]*[^\s,.])")
urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates
added_urls = []
group = ConfirmGroup(urls)
for url in urls:
if url not in self.rejected_urls:
@@ -824,11 +874,10 @@ class Coder:
):
inp += "\n\n"
inp += self.commands.cmd_web(url, return_content=True)
added_urls.append(url)
else:
self.rejected_urls.add(url)
return added_urls
return inp
def keyboard_interrupt(self):
now = time.time()
@@ -836,6 +885,7 @@ class Coder:
thresh = 2 # seconds
if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:
self.io.tool_warning("\n\n^C KeyboardInterrupt")
self.event("exit", reason="Control-C")
sys.exit()
self.io.tool_warning("\n\n^C again to exit")
@@ -1059,7 +1109,7 @@ class Coder:
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
# Add the reminder prompt if we still have room to include it.
if (
max_input_tokens is None
not max_input_tokens
or total_tokens < max_input_tokens
and self.gpt_prompts.system_reminder
):
@@ -1137,6 +1187,8 @@ class Coder:
return chunks
def send_message(self, inp):
self.event("message_send_starting")
self.cur_messages += [
dict(role="user", content=inp),
]
@@ -1216,6 +1268,7 @@ class Coder:
lines = traceback.format_exception(type(err), err, err.__traceback__)
self.io.tool_warning("".join(lines))
self.io.tool_error(str(err))
self.event("message_send_exception", exception=str(err))
return
finally:
if self.mdstream:
@@ -1350,9 +1403,7 @@ class Coder:
res.append("- Ask for smaller changes in each request.")
res.append("- Break your code into smaller source files.")
if "diff" not in self.main_model.edit_format:
res.append(
"- Use a stronger model like gpt-4o, sonnet or opus that can return diffs."
)
res.append("- Use a stronger model that can return diffs.")
if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:
res.append("")
@@ -1405,9 +1456,18 @@ class Coder:
addable_rel_fnames = self.get_addable_relative_files()
# Get basenames of files already in chat or read-only
existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {
os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames
}
mentioned_rel_fnames = set()
fname_to_rel_fnames = {}
for rel_fname in addable_rel_fnames:
# Skip files that share a basename with files already in chat
if os.path.basename(rel_fname) in existing_basenames:
continue
normalized_rel_fname = rel_fname.replace("\\", "/")
normalized_words = set(word.replace("\\", "/") for word in words)
if normalized_rel_fname in normalized_words:
@@ -2057,7 +2117,7 @@ class Coder:
self.io.tool_output(f"Running {command}")
# Add the command to input history
self.io.add_to_input_history(f"/run {command.strip()}")
exit_status, output = run_cmd(command, error_print=self.io.tool_error)
exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)
if output:
accumulated_output += f"Output from {command}\n{output}\n"

View File

@@ -808,15 +808,33 @@ class Commands:
# Expand tilde in the path
expanded_word = os.path.expanduser(word)
# Handle read-only files separately, without glob_filtered_to_repo
read_only_matched = [f for f in self.coder.abs_read_only_fnames if expanded_word in f]
# Handle read-only files with substring matching and samefile check
read_only_matched = []
for f in self.coder.abs_read_only_fnames:
if expanded_word in f:
read_only_matched.append(f)
continue
if read_only_matched:
for matched_file in read_only_matched:
self.coder.abs_read_only_fnames.remove(matched_file)
self.io.tool_output(f"Removed read-only file {matched_file} from the chat")
# Try samefile comparison for relative paths
try:
abs_word = os.path.abspath(expanded_word)
if os.path.samefile(abs_word, f):
read_only_matched.append(f)
except (FileNotFoundError, OSError):
continue
matched_files = self.glob_filtered_to_repo(expanded_word)
for matched_file in read_only_matched:
self.coder.abs_read_only_fnames.remove(matched_file)
self.io.tool_output(f"Removed read-only file {matched_file} from the chat")
# For editable files, use glob if word contains glob chars, otherwise use substring
if any(c in expanded_word for c in "*?[]"):
matched_files = self.glob_filtered_to_repo(expanded_word)
else:
# Use substring matching like we do for read-only files
matched_files = [
self.coder.get_rel_fname(f) for f in self.coder.abs_fnames if expanded_word in f
]
if not matched_files:
matched_files.append(expanded_word)
@@ -876,7 +894,7 @@ class Commands:
def cmd_run(self, args, add_on_nonzero_exit=False):
"Run a shell command and optionally add the output to the chat (alias: !)"
exit_status, combined_output = run_cmd(
args, verbose=self.verbose, error_print=self.io.tool_error
args, verbose=self.verbose, error_print=self.io.tool_error, cwd=self.coder.root
)
if combined_output is None:
@@ -904,11 +922,12 @@ class Commands:
def cmd_exit(self, args):
"Exit the application"
self.coder.event("exit", reason="/exit")
sys.exit()
def cmd_quit(self, args):
"Exit the application"
sys.exit()
self.cmd_exit(args)
def cmd_ls(self, args):
"List all known files and indicate which are included in the chat session"
@@ -1080,7 +1099,9 @@ class Commands:
self.io.tool_error("To use /voice you must provide an OpenAI API key.")
return
try:
self.voice = voice.Voice(audio_format=self.args.voice_format)
self.voice = voice.Voice(
audio_format=self.args.voice_format, device_name=self.args.voice_input_device
)
except voice.SoundDeviceError:
self.io.tool_error(
"Unable to import `sounddevice` and/or `soundfile`, is portaudio installed?"
@@ -1365,9 +1386,8 @@ class Commands:
"Open an editor to write a prompt"
user_input = pipe_editor(initial_content, suffix="md", editor=self.editor)
self.io.user_input(user_input, log_only=False)
self.io.add_to_input_history(user_input)
return user_input
if user_input.strip():
self.io.set_placeholder(user_input.rstrip())
def expand_subdir(file_path):

View File

@@ -198,6 +198,7 @@ class InputOutput:
editingmode=EditingMode.EMACS,
fancy_input=True,
):
self.placeholder = None
self.never_prompts = set()
self.editingmode = editingmode
no_color = os.environ.get("NO_COLOR")
@@ -417,6 +418,16 @@ class InputOutput:
"Ignore Ctrl when pressing space bar"
event.current_buffer.insert_text(" ")
@kb.add("c-up")
def _(event):
"Navigate backward through history"
event.current_buffer.history_backward()
@kb.add("c-down")
def _(event):
"Navigate forward through history"
event.current_buffer.history_forward()
@kb.add("escape", "c-m", eager=True)
def _(event):
event.current_buffer.insert_text("\n")
@@ -427,8 +438,13 @@ class InputOutput:
try:
if self.prompt_session:
# Use placeholder if set, then clear it
default = self.placeholder or ""
self.placeholder = None
line = self.prompt_session.prompt(
show,
default=default,
completer=completer_instance,
reserve_space_for_menu=4,
complete_style=CompleteStyle.MULTI_COLUMN,
@@ -486,10 +502,13 @@ class InputOutput:
def add_to_input_history(self, inp):
if not self.input_history_file:
return
FileHistory(self.input_history_file).append_string(inp)
# Also add to the in-memory history if it exists
if self.prompt_session and self.prompt_session.history:
self.prompt_session.history.append_string(inp)
try:
FileHistory(self.input_history_file).append_string(inp)
# Also add to the in-memory history if it exists
if self.prompt_session and self.prompt_session.history:
self.prompt_session.history.append_string(inp)
except OSError as err:
self.tool_warning(f"Unable to write to input history file: {err}")
def get_input_history(self):
if not self.input_history_file:
@@ -536,11 +555,11 @@ class InputOutput:
hist = "\n" + content.strip() + "\n\n"
self.append_chat_history(hist)
def offer_url(self, url, prompt="Open URL for more info?"):
def offer_url(self, url, prompt="Open URL for more info?", allow_never=True):
"""Offer to open a URL in the browser, returns True if opened."""
if url in self.never_prompts:
return False
if self.confirm_ask(prompt, subject=url, allow_never=True):
if self.confirm_ask(prompt, subject=url, allow_never=allow_never):
webbrowser.open(url)
return True
return False
@@ -739,6 +758,10 @@ class InputOutput:
self.console.print(show_resp)
def set_placeholder(self, placeholder):
"""Set a one-time placeholder text for the next input prompt."""
self.placeholder = placeholder
def print(self, message=""):
print(message)

View File

@@ -49,11 +49,11 @@ class Linter:
try:
process = subprocess.Popen(
cmd,
cwd=self.root,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding=self.encoding,
errors="replace",
cwd=self.root,
)
except OSError as err:
print(f"Unable to execute lint command: {err}")
@@ -152,12 +152,12 @@ class Linter:
try:
result = subprocess.run(
flake8_cmd,
cwd=self.root,
capture_output=True,
text=True,
check=False,
encoding=self.encoding,
errors="replace",
cwd=self.root,
)
errors = result.stdout + result.stderr
except Exception as e:

View File

@@ -6,6 +6,7 @@ import sys
import threading
import traceback
import webbrowser
from dataclasses import fields
from pathlib import Path
import git
@@ -17,11 +18,13 @@ from aider import __version__, models, urls, utils
from aider.analytics import Analytics
from aider.args import get_parser
from aider.coders import Coder
from aider.coders.base_coder import UnknownEditFormat
from aider.commands import Commands, SwitchCoder
from aider.format_settings import format_settings, scrub_sensitive_info
from aider.history import ChatSummary
from aider.io import InputOutput
from aider.llm import litellm # noqa: F401; properly init litellm on launch
from aider.models import ModelSettings
from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.report import report_uncaught_exceptions
from aider.versioncheck import check_version, install_from_main_branch, install_upgrade
@@ -154,13 +157,17 @@ def check_gitignore(git_root, io, ask=True):
gitignore_file = Path(git_root) / ".gitignore"
if gitignore_file.exists():
content = io.read_text(gitignore_file)
if content is None:
try:
content = io.read_text(gitignore_file)
if content is None:
return
existing_lines = content.splitlines()
for pat in patterns:
if pat not in existing_lines:
patterns_to_add.append(pat)
except OSError as e:
io.tool_error(f"Error when trying to read {gitignore_file}: {e}")
return
existing_lines = content.splitlines()
for pat in patterns:
if pat not in existing_lines:
patterns_to_add.append(pat)
else:
content = ""
patterns_to_add = patterns
@@ -174,9 +181,17 @@ def check_gitignore(git_root, io, ask=True):
if content and not content.endswith("\n"):
content += "\n"
content += "\n".join(patterns_to_add) + "\n"
io.write_text(gitignore_file, content)
io.tool_output(f"Added {', '.join(patterns_to_add)} to .gitignore")
try:
io.write_text(gitignore_file, content)
io.tool_output(f"Added {', '.join(patterns_to_add)} to .gitignore")
except OSError as e:
io.tool_error(f"Error when trying to write to {gitignore_file}: {e}")
io.tool_output(
"Try running with appropriate permissions or manually add these patterns to .gitignore:"
)
for pattern in patterns_to_add:
io.tool_output(f" {pattern}")
def check_streamlit_install(io):
@@ -452,6 +467,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
litellm._lazy_module.client_session = httpx.Client(verify=False)
litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False)
if args.timeout:
litellm._load_litellm()
litellm._lazy_module.request_timeout = args.timeout
if args.dark_mode:
args.user_input_color = "#32FF32"
args.tool_error_color = "#FF3333"
@@ -532,9 +551,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.gui and not return_coder:
if not check_streamlit_install(io):
analytics.event("exit", reason="Streamlit not installed")
return
analytics.event("gui session")
launch_gui(argv)
analytics.event("exit", reason="GUI session ended")
return
if args.verbose:
@@ -561,6 +582,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
io.tool_output(
"Provide either a single directory of a git repo, or a list of one or more files."
)
analytics.event("exit", reason="Invalid directory input")
return 1
git_dname = None
@@ -571,6 +593,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
fnames = []
else:
io.tool_error(f"{all_files[0]} is a directory, but --no-git selected.")
analytics.event("exit", reason="Directory with --no-git")
return 1
# We can't know the git repo for sure until after parsing the args.
@@ -579,18 +602,22 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.git and not force_git_root:
right_repo_root = guessed_wrong_repo(io, git_root, fnames, git_dname)
if right_repo_root:
analytics.event("exit", reason="Recursing with correct repo")
return main(argv, input, output, right_repo_root, return_coder=return_coder)
if args.just_check_update:
update_available = check_version(io, just_check=True, verbose=args.verbose)
analytics.event("exit", reason="Just checking update")
return 0 if not update_available else 1
if args.install_main_branch:
success = install_from_main_branch(io)
analytics.event("exit", reason="Installed main branch")
return 0 if success else 1
if args.upgrade:
success = install_upgrade(io)
analytics.event("exit", reason="Upgrade completed")
return 0 if success else 1
if args.check_update:
@@ -598,6 +625,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.list_models:
models.print_matching_models(io, args.list_models)
analytics.event("exit", reason="Listed models")
return 0
if args.git:
@@ -633,6 +661,19 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
register_models(git_root, args.model_settings_file, io, verbose=args.verbose)
register_litellm_models(git_root, args.model_metadata_file, io, verbose=args.verbose)
# Process any command line aliases
if args.alias:
for alias_def in args.alias:
# Split on first colon only
parts = alias_def.split(":", 1)
if len(parts) != 2:
io.tool_error(f"Invalid alias format: {alias_def}")
io.tool_output("Format should be: alias:model-name")
analytics.event("exit", reason="Invalid alias format error")
return 1
alias, model = parts
models.MODEL_ALIASES[alias.strip()] = model.strip()
if not args.model:
args.model = "gpt-4o-2024-08-06"
if os.environ.get("ANTHROPIC_API_KEY"):
@@ -646,11 +687,18 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
)
if args.verbose:
io.tool_output("Model info:")
io.tool_output("Model metadata:")
io.tool_output(json.dumps(main_model.info, indent=4))
io.tool_output("Model settings:")
for attr in sorted(fields(ModelSettings), key=lambda x: x.name):
val = getattr(main_model, attr.name)
val = json.dumps(val, indent=4)
io.tool_output(f"{attr.name}: {val}")
lint_cmds = parse_lint_cmds(args.lint_cmd, io)
if lint_cmds is None:
analytics.event("exit", reason="Invalid lint command format")
return 1
if args.show_model_warnings:
@@ -663,6 +711,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
io.offer_url(urls.model_warnings, "Open documentation url for more info?")
io.tool_output()
except KeyboardInterrupt:
analytics.event("exit", reason="Keyboard interrupt during model warnings")
return 1
repo = None
@@ -686,8 +735,14 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if not args.skip_sanity_check_repo:
if not sanity_check_repo(repo, io):
analytics.event("exit", reason="Repository sanity check failed")
return 1
if repo:
analytics.event("repo", num_files=len(repo.get_tracked_files()))
else:
analytics.event("no-repo")
commands = Commands(
io,
None,
@@ -743,12 +798,20 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
num_cache_warming_pings=args.cache_keepalive_pings,
suggest_shell_commands=args.suggest_shell_commands,
chat_language=args.chat_language,
detect_urls=args.detect_urls,
)
except UnknownEditFormat as err:
io.tool_error(str(err))
io.offer_url(urls.edit_formats, "Open documentation about edit formats?")
analytics.event("exit", reason="Unknown edit format")
return 1
except ValueError as err:
io.tool_error(str(err))
analytics.event("exit", reason="ValueError during coder creation")
return 1
if return_coder:
analytics.event("exit", reason="Returning coder object")
return coder
coder.show_announcements()
@@ -759,6 +822,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
]
messages = coder.format_messages().all_messages()
utils.show_messages(messages)
analytics.event("exit", reason="Showed prompts")
return
if args.lint:
@@ -767,6 +831,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.test:
if not args.test_cmd:
io.tool_error("No --test-cmd provided.")
analytics.event("exit", reason="No test command provided")
return 1
test_errors = coder.commands.cmd_test(args.test_cmd)
if test_errors:
@@ -779,43 +844,41 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.commands.cmd_commit()
if args.lint or args.test or args.commit:
analytics.event("exit", reason="Completed lint/test/commit")
return
if args.show_repo_map:
repo_map = coder.get_repo_map()
if repo_map:
io.tool_output(repo_map)
analytics.event("exit", reason="Showed repo map")
return
if args.apply:
content = io.read_text(args.apply)
if content is None:
analytics.event("exit", reason="Failed to read apply content")
return
coder.partial_response_content = content
coder.apply_updates()
analytics.event("exit", reason="Applied updates")
return
if args.apply_clipboard_edits:
args.edit_format = main_model.editor_edit_format
args.message = "/paste"
if "VSCODE_GIT_IPC_HANDLE" in os.environ:
args.pretty = False
io.tool_output("VSCode terminal detected, pretty output has been disabled.")
io.tool_output('Use /help <question> for help, run "aider --help" to see cmd line args')
show = False
if args.show_release_notes is True:
show = True
elif args.show_release_notes is None and is_first_run:
io.tool_output()
show = io.confirm_ask("Would you like to see what's new in this version?")
if show:
io.tool_output(f"Opening release notes: {urls.release_notes}")
io.tool_output()
webbrowser.open(urls.release_notes)
elif args.show_release_notes is None and is_first_run:
io.tool_output()
io.offer_url(
urls.release_notes,
"Would you like to see what's new in this version?",
allow_never=False,
)
if git_root and Path.cwd().resolve() != Path(git_root).resolve():
io.tool_warning(
@@ -836,6 +899,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.run(with_message=args.message)
except SwitchCoder:
pass
analytics.event("exit", reason="Completed --message")
return
if args.message_file:
@@ -845,13 +909,18 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.run(with_message=message_from_file)
except FileNotFoundError:
io.tool_error(f"Message file not found: {args.message_file}")
analytics.event("exit", reason="Message file not found")
return 1
except IOError as e:
io.tool_error(f"Error reading message file: {e}")
analytics.event("exit", reason="Message file IO error")
return 1
analytics.event("exit", reason="Completed --message-file")
return
if args.exit:
analytics.event("exit", reason="Exit flag set")
return
analytics.event("cli session", main_model=main_model, edit_format=main_model.edit_format)
@@ -859,6 +928,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
while True:
try:
coder.run()
analytics.event("exit", reason="Completed main CLI coder.run")
return
except SwitchCoder as switch:
kwargs = dict(io=io, from_coder=coder)
@@ -877,6 +947,10 @@ def is_first_run_of_new_version(io, verbose=False):
installs_file = Path.home() / ".aider" / "installs.json"
key = (__version__, sys.executable)
# Never show notes for .dev versions
if ".dev" in __version__:
return False
if verbose:
io.tool_output(
f"Checking imports for version {__version__} and executable {sys.executable}"

View File

@@ -17,7 +17,7 @@ from aider.dump import dump # noqa: F401
from aider.llm import litellm
DEFAULT_MODEL_NAME = "gpt-4o"
ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31"
ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31,pdfs-2024-09-25"
OPENAI_MODELS = """
gpt-4
@@ -61,6 +61,23 @@ claude-3-5-sonnet-20241022
ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.strip()]
# Mapping of model aliases to their canonical names
MODEL_ALIASES = {
# Claude models
"sonnet": "claude-3-5-sonnet-20241022",
"haiku": "claude-3-5-haiku-20241022",
"opus": "claude-3-opus-20240229",
# GPT models
"4": "gpt-4-0613",
"4o": "gpt-4o-2024-08-06",
"4-turbo": "gpt-4-1106-preview",
"35turbo": "gpt-3.5-turbo",
"35-turbo": "gpt-3.5-turbo",
"3": "gpt-3.5-turbo",
# Other models
"deepseek": "deepseek/deepseek-coder",
}
@dataclass
class ModelSettings:
@@ -711,6 +728,7 @@ MODEL_SETTINGS = [
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/openai/o1-preview",
@@ -722,6 +740,7 @@ MODEL_SETTINGS = [
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/qwen/qwen-2.5-coder-32b-instruct",
@@ -817,7 +836,11 @@ model_info_manager = ModelInfoManager()
class Model(ModelSettings):
def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):
# Map any alias to its canonical name
model = MODEL_ALIASES.get(model, model)
self.name = model
self.max_chat_history_tokens = 1024
self.weak_model = None
self.editor_model = None
@@ -933,10 +956,11 @@ class Model(ModelSettings):
and ("2.5" in model or "2-5" in model)
and "32b" in model
):
"openrouter/qwen/qwen-2.5-coder-32b-instruct",
self.edit_format = "diff"
self.editor_edit_format = "editor-diff"
self.use_repo_map = True
if model.startswith("ollama/") or model.startswith("ollama_chat/"):
self.extra_params = dict(num_ctx=8 * 1024)
return # <--
# use the defaults
@@ -1099,6 +1123,9 @@ def register_models(model_settings_fnames):
if not os.path.exists(model_settings_fname):
continue
if not Path(model_settings_fname).read_text().strip():
continue
try:
with open(model_settings_fname, "r") as model_settings_file:
model_settings_list = yaml.safe_load(model_settings_file)

View File

@@ -0,0 +1,91 @@
(class_definition
name: (identifier) @name.definition.class) @definition.class
(method_signature
(function_signature)) @definition.method
(type_alias
(type_identifier) @name.definition.type) @definition.type
(method_signature
(getter_signature
name: (identifier) @name.definition.method)) @definition.method
(method_signature
(setter_signature
name: (identifier) @name.definition.method)) @definition.method
(method_signature
(function_signature
name: (identifier) @name.definition.method)) @definition.method
(method_signature
(factory_constructor_signature
(identifier) @name.definition.method)) @definition.method
(method_signature
(constructor_signature
name: (identifier) @name.definition.method)) @definition.method
(method_signature
(operator_signature)) @definition.method
(method_signature) @definition.method
(mixin_declaration
(mixin)
(identifier) @name.definition.mixin) @definition.mixin
(extension_declaration
name: (identifier) @name.definition.extension) @definition.extension
(enum_declaration
name: (identifier) @name.definition.enum) @definition.enum
(function_signature
name: (identifier) @name.definition.function) @definition.function
(new_expression
(type_identifier) @name.reference.class) @reference.class
(initialized_variable_definition
name: (identifier)
value: (identifier) @name.reference.class
value: (selector
"!"?
(argument_part
(arguments
(argument)*))?)?) @reference.class
(assignment_expression
left: (assignable_expression
(identifier)
(unconditional_assignable_selector
"."
(identifier) @name.reference.call))) @reference.call
(assignment_expression
left: (assignable_expression
(identifier)
(conditional_assignable_selector
"?."
(identifier) @name.reference.call))) @reference.call
((identifier) @name
(selector
"!"?
(conditional_assignable_selector
"?." (identifier) @name.reference.call)?
(unconditional_assignable_selector
"."? (identifier) @name.reference.call)?
(argument_part
(arguments
(argument)*))?)*
(cascade_section
(cascade_selector
(identifier)) @name.reference.call
(argument_part
(arguments
(argument)*))?)?) @reference.call

View File

@@ -8,12 +8,12 @@ import pexpect
import psutil
def run_cmd(command, verbose=False, error_print=None):
def run_cmd(command, verbose=False, error_print=None, cwd=None):
try:
if sys.stdin.isatty() and hasattr(pexpect, "spawn") and platform.system() != "Windows":
return run_cmd_pexpect(command, verbose)
return run_cmd_pexpect(command, verbose, cwd)
return run_cmd_subprocess(command, verbose)
return run_cmd_subprocess(command, verbose, cwd)
except OSError as e:
error_message = f"Error occurred while running command '{command}': {str(e)}"
if error_print is None:
@@ -39,7 +39,7 @@ def get_windows_parent_process_name():
return None
def run_cmd_subprocess(command, verbose=False):
def run_cmd_subprocess(command, verbose=False, cwd=None):
if verbose:
print("Using run_cmd_subprocess:", command)
@@ -69,6 +69,7 @@ def run_cmd_subprocess(command, verbose=False):
errors="replace",
bufsize=0, # Set bufsize to 0 for unbuffered output
universal_newlines=True,
cwd=cwd,
)
output = []
@@ -85,7 +86,7 @@ def run_cmd_subprocess(command, verbose=False):
return 1, str(e)
def run_cmd_pexpect(command, verbose=False):
def run_cmd_pexpect(command, verbose=False, cwd=None):
"""
Run a shell command interactively using pexpect, capturing all output.
@@ -112,12 +113,12 @@ def run_cmd_pexpect(command, verbose=False):
# Use the shell from SHELL environment variable
if verbose:
print("Running pexpect.spawn with shell:", shell)
child = pexpect.spawn(shell, args=["-c", command], encoding="utf-8")
child = pexpect.spawn(shell, args=["-c", command], encoding="utf-8", cwd=cwd)
else:
# Fall back to spawning the command directly
if verbose:
print("Running pexpect.spawn without shell.")
child = pexpect.spawn(command, encoding="utf-8")
child = pexpect.spawn(command, encoding="utf-8", cwd=cwd)
# Transfer control to the user, capturing output
child.interact(output_filter=output_callback)

View File

@@ -13,3 +13,4 @@ git_index_version = "https://github.com/Aider-AI/aider/issues/211"
install_properly = "https://aider.chat/docs/troubleshooting/imports.html"
analytics = "https://aider.chat/docs/more/analytics.html"
release_notes = "https://aider.chat/HISTORY.html#release-notes"
edit_formats = "https://aider.chat/docs/more/edit-formats.html"

View File

@@ -13,7 +13,7 @@ import git
from aider.dump import dump # noqa: F401
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp"}
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".pdf"}
class IgnorantTemporaryDirectory:

View File

@@ -34,7 +34,7 @@ class Voice:
threshold = 0.15
def __init__(self, audio_format="wav"):
def __init__(self, audio_format="wav", device_name=None):
if sf is None:
raise SoundDeviceError
try:
@@ -42,6 +42,27 @@ class Voice:
import sounddevice as sd
self.sd = sd
devices = sd.query_devices()
if device_name:
# Find the device with matching name
device_id = None
for i, device in enumerate(devices):
if device_name in device["name"]:
device_id = i
break
if device_id is None:
available_inputs = [d["name"] for d in devices if d["max_input_channels"] > 0]
raise ValueError(f"Device '{device_name}' not found. Available input devices: {available_inputs}")
print(f"Using input device: {device_name} (ID: {device_id})")
self.device_id = device_id
else:
self.device_id = None
except (OSError, ModuleNotFoundError):
raise SoundDeviceError
if audio_format not in ["wav", "mp3", "webm"]:
@@ -93,7 +114,7 @@ class Voice:
temp_wav = tempfile.mktemp(suffix=".wav")
try:
sample_rate = int(self.sd.query_devices(None, "input")["default_samplerate"])
sample_rate = int(self.sd.query_devices(self.device_id, "input")["default_samplerate"])
except (TypeError, ValueError):
sample_rate = 16000 # fallback to 16kHz if unable to query device
except self.sd.PortAudioError:
@@ -104,7 +125,7 @@ class Voice:
self.start_time = time.time()
try:
with self.sd.InputStream(samplerate=sample_rate, channels=1, callback=self.callback):
with self.sd.InputStream(samplerate=sample_rate, channels=1, callback=self.callback, device=self.device_id):
prompt(self.get_prompt, refresh_interval=0.1)
except self.sd.PortAudioError as err:
raise SoundDeviceError(f"Error accessing audio input device: {err}")

View File

@@ -24,9 +24,47 @@ cog.out(text)
]]]-->
### main branch
- PDF support for Sonnet and Gemini models.
- Added `--voice-input-device` to select audio input device for voice recording, by @preynal.
- Added `--timeout` option to configure API call timeouts.
- Set cwd to repo root when running shell commands.
- Added Ctrl-Up/Down keyboard shortcuts for per-message history navigation.
- Improved error handling for failed .gitignore file operations.
- Improved error handling for input history file permissions.
- Improved error handling for analytics file access.
- Removed spurious warning about disabling pretty in VSCode.
- Removed broken support for Dart.
- Bugfix when scraping URLs found in chat messages.
- Better handling of __version__ import errors.
- Improved `/drop` command to support substring matching for non-glob patterns.
- Aider wrote 82% of the code in this release.
### Aider v0.65.1
- Bugfix to `--alias`.
### Aider v0.65.0
- Added `--alias` config to define [custom model aliases](https://aider.chat/docs/config/model-aliases.html).
- Added `--[no-]detect-urls` flag to disable detecting and offering to scrape URLs found in the chat.
- Ollama models now default to an 8k context window.
- Added [RepoMap support for Dart language](https://aider.chat/docs/languages.html) by @malkoG.
- Ask 2.5% of users if they want to opt-in to [analytics](https://aider.chat/docs/more/analytics.html).
- Skip suggesting files that share names with files already in chat.
- `/editor` returns and prefill the file content into the prompt, so you can use `/editor` to compose messages that start with `/commands`, etc.
- Enhanced error handling for analytics.
- Improved handling of UnknownEditFormat exceptions with helpful documentation links.
- Bumped dependencies to pick up grep-ast 0.4.0 for Dart language support.
- Aider wrote 81% of the code in this release.
### Aider v0.64.1
- Disable streaming for o1 on OpenRouter.
### Aider v0.64.0
- Added [`/editor` command](https://aider.chat/docs/usage/commands.html) to open system editor for writing prompts, by @thehunmonkgroup.
- Full support for `gpt-4o-2024-11-20`.
- Stream o1 models by default.

View File

@@ -2143,8 +2143,8 @@
Paul Gauthier (aider): 201
start_tag: v0.49.0
total_lines: 324
- aider_percentage: 56.18
aider_total: 450
- aider_percentage: 52.49
aider_total: 580
end_date: '2024-08-20'
end_tag: v0.51.0
file_counts:
@@ -2178,6 +2178,12 @@
Paul Gauthier: 3
aider/utils.py:
Paul Gauthier (aider): 6
aider/website/_includes/code-in-json-benchmark.js:
Paul Gauthier: 101
Paul Gauthier (aider): 64
aider/website/_includes/code-in-json-syntax.js:
Paul Gauthier: 73
Paul Gauthier (aider): 66
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 1
benchmark/benchmark.py:
@@ -2198,10 +2204,10 @@
Paul Gauthier: 15
Paul Gauthier (aider): 104
grand_total:
Paul Gauthier: 351
Paul Gauthier (aider): 450
Paul Gauthier: 525
Paul Gauthier (aider): 580
start_tag: v0.50.0
total_lines: 801
total_lines: 1105
- aider_percentage: 68.1
aider_total: 521
end_date: '2024-08-23'
@@ -2267,8 +2273,8 @@
pcamp: 1
start_tag: v0.51.0
total_lines: 765
- aider_percentage: 58.61
aider_total: 405
- aider_percentage: 61.4
aider_total: 455
end_date: '2024-08-27'
end_tag: v0.53.0
file_counts:
@@ -2333,13 +2339,15 @@
tests/basic/test_repomap.py:
Paul Gauthier: 4
Paul Gauthier (aider): 63
tests/fixtures/sample-code-base/sample.js:
Paul Gauthier (aider): 50
tests/fixtures/sample-code-base/sample.py:
Paul Gauthier (aider): 68
grand_total:
Paul Gauthier: 286
Paul Gauthier (aider): 405
Paul Gauthier (aider): 455
start_tag: v0.52.0
total_lines: 691
total_lines: 741
- aider_percentage: 63.75
aider_total: 204
end_date: '2024-08-28'
@@ -3013,3 +3021,151 @@
Paul Gauthier (aider): 385
start_tag: v0.62.0
total_lines: 699
- aider_percentage: 73.15
aider_total: 880
end_date: '2024-11-21'
end_tag: v0.64.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/analytics.py:
Paul Gauthier: 20
Paul Gauthier (aider): 21
aider/args.py:
Paul Gauthier: 2
Paul Gauthier (aider): 10
aider/coders/base_coder.py:
Paul Gauthier: 15
Paul Gauthier (aider): 3
caetanominuzzo: 1
aider/commands.py:
Chad Phillips: 4
Paul Gauthier: 5
Paul Gauthier (aider): 19
aider/editor.py:
Chad Phillips: 133
Paul Gauthier (aider): 13
aider/exceptions.py:
Paul Gauthier: 5
aider/help_pats.py:
Paul Gauthier: 1
aider/io.py:
Chad Phillips: 9
Paul Gauthier (aider): 41
mw: 21
aider/main.py:
Paul Gauthier: 21
Paul Gauthier (aider): 41
aider/models.py:
Paul Gauthier: 41
Paul Gauthier (aider): 33
aider/repo.py:
Paul Gauthier (aider): 5
aider/urls.py:
Paul Gauthier: 1
aider/website/_includes/edit-leaderboard.js:
Paul Gauthier (aider): 97
aider/website/_includes/quant-chart.js:
Paul Gauthier: 3
Paul Gauthier (aider): 66
aider/website/_includes/refactor-leaderboard.js:
Paul Gauthier (aider): 90
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 1
Paul Gauthier (aider): 10
aider/website/share/index.md:
Paul Gauthier (aider): 29
benchmark/over_time.py:
Paul Gauthier: 11
Paul Gauthier (aider): 162
scripts/blame.py:
Paul Gauthier: 1
Paul Gauthier (aider): 2
scripts/issues.py:
Paul Gauthier: 5
Paul Gauthier (aider): 12
scripts/versionbump.py:
Paul Gauthier: 7
tests/basic/test_analytics.py:
Paul Gauthier: 12
Paul Gauthier (aider): 30
tests/basic/test_commands.py:
Paul Gauthier (aider): 4
tests/basic/test_editor.py:
Paul Gauthier (aider): 129
tests/basic/test_main.py:
Paul Gauthier (aider): 8
tests/basic/test_models.py:
Paul Gauthier: 3
Paul Gauthier (aider): 55
grand_total:
Chad Phillips: 146
Paul Gauthier: 155
Paul Gauthier (aider): 880
caetanominuzzo: 1
mw: 21
start_tag: v0.63.0
total_lines: 1203
- aider_percentage: 81.11
aider_total: 584
end_date: '2024-11-26'
end_tag: v0.65.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/analytics.py:
Paul Gauthier: 2
Paul Gauthier (aider): 5
aider/args.py:
Paul Gauthier (aider): 12
aider/coders/base_coder.py:
Paul Gauthier: 1
Paul Gauthier (aider): 31
aider/commands.py:
Paul Gauthier: 2
aider/io.py:
Paul Gauthier: 3
Paul Gauthier (aider): 9
aider/main.py:
Paul Gauthier: 15
Paul Gauthier (aider): 19
aider/models.py:
Paul Gauthier: 9
Paul Gauthier (aider): 17
aider/queries/tree-sitter-dart-tags.scm:
malkoG: 91
aider/urls.py:
Paul Gauthier (aider): 1
aider/website/_includes/quant-chart.js:
Paul Gauthier (aider): 76
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 1
benchmark/benchmark.py:
Paul Gauthier (aider): 10
benchmark/docker.sh:
Paul Gauthier (aider): 1
benchmark/over_time.py:
Paul Gauthier: 1
Paul Gauthier (aider): 157
scripts/update-docs.sh:
Paul Gauthier: 1
scripts/update-history.py:
Paul Gauthier: 8
Paul Gauthier (aider): 64
tests/basic/test_coder.py:
Paul Gauthier (aider): 81
tests/basic/test_editor.py:
Paul Gauthier (aider): 16
tests/basic/test_main.py:
Paul Gauthier: 1
Paul Gauthier (aider): 42
tests/basic/test_models.py:
Paul Gauthier (aider): 30
tests/basic/test_repomap.py:
Paul Gauthier (aider): 13
grand_total:
Paul Gauthier: 45
Paul Gauthier (aider): 584
malkoG: 91
start_tag: v0.64.0
total_lines: 720

View File

@@ -46,7 +46,8 @@
- dirname: 2024-05-03-20-47-24--gemini-1.5-pro-diff-fenced
test_cases: 133
model: gemini-1.5-pro-latest
released: 2024-05-03
model: gemini-1.5-pro-001
edit_format: diff-fenced
commit_hash: 3a48dfb, 5d32dd7
pass_rate_1: 45.9
@@ -410,6 +411,7 @@
- dirname: 2024-06-08-22-37-55--qwen2-72b-instruct-whole
test_cases: 133
model: Qwen2 72B Instruct
released: 2024-06-08
edit_format: whole
commit_hash: 02c7335-dirty, 1a97498-dirty
pass_rate_1: 44.4
@@ -835,6 +837,7 @@
- dirname: 2024-08-28-07-10-50--gemini-1.5-pro-exp-0827-diff-fenced
test_cases: 133
model: gemini-1.5-pro-exp-0827
released: 2024-08-27
edit_format: diff-fenced
commit_hash: d8adc75
pass_rate_1: 54.9
@@ -1206,6 +1209,7 @@
- dirname: 2024-09-24-16-26-45--gemini-1.5-pro-002-diff-fenced
test_cases: 133
model: gemini-1.5-pro-002
released: 2024-09-24
edit_format: diff-fenced
commit_hash: 6b5fe9b, 3edcd71
pass_rate_1: 49.6
@@ -1796,7 +1800,7 @@
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model openai/Qwen2.5-Coder-32B-Instruct
command: aider --model openai/hf:Qwen/Qwen2.5-Coder-32B-Instruct --openai-api-base https://glhf.chat/api/openai/v1
date: 2024-11-09
versions: 0.59.2.dev
seconds_per_case: 22.5
@@ -1894,4 +1898,125 @@
date: 2024-11-20
versions: 0.63.3.dev
seconds_per_case: 40.7
total_cost: 0.1497
total_cost: 0.1497
- dirname: 2024-11-21-17-46-36--gemini-exp-1121-diff
test_cases: 133
model: gemini-exp-1121
released: 2024-11-21
edit_format: diff
commit_hash: e94961a
pass_rate_1: 46.6
pass_rate_2: 57.9
percent_cases_well_formed: 83.5
error_outputs: 101
num_malformed_responses: 101
num_with_malformed_responses: 22
user_asks: 5
lazy_comments: 0
syntax_errors: 0
indentation_errors: 2
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model gemini/gemini-exp-1121
date: 2024-11-21
versions: 0.63.3.dev
seconds_per_case: 60.3
total_cost: 0.0000
- dirname: 2024-11-15-20-33-31--gemini-exp-1114-diff
test_cases: 133
model: gemini-exp-1114
released: 2024-11-14
edit_format: diff
commit_hash: 0bf17a4
pass_rate_1: 50.4
pass_rate_2: 60.9
percent_cases_well_formed: 85.7
error_outputs: 70
num_malformed_responses: 70
num_with_malformed_responses: 19
user_asks: 2
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model gemini/gemini-exp-1114
date: 2024-11-15
versions: 0.63.2.dev
seconds_per_case: 38.6
- dirname: 2024-11-27-07-41-51--qwen2.5-coder-14b-whole-1
test_cases: 133
model: ollama/qwen2.5-coder:14b
edit_format: whole
commit_hash: 200295e
pass_rate_1: 53.4
pass_rate_2: 61.7
percent_cases_well_formed: 98.5
error_outputs: 4
num_malformed_responses: 4
num_with_malformed_responses: 2
user_asks: 48
lazy_comments: 0
syntax_errors: 2
indentation_errors: 2
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model ollama/qwen2.5-coder:14b
date: 2024-11-27
versions: 0.65.2.dev
seconds_per_case: 58.0
total_cost: 0.0000
- dirname: 2024-11-28-07-42-56--qwen2.5-coder-32b-whole-4
test_cases: 133
model: ollama/qwen2.5-coder:32b
edit_format: whole
commit_hash: 200295e
pass_rate_1: 58.6
pass_rate_2: 72.9
percent_cases_well_formed: 100.0
num_malformed_responses: 0
num_with_malformed_responses: 0
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
command: aider --model ollama/qwen2.5-coder:32b
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 147.5
total_cost: 0.0000
- dirname: 2024-11-28-13-14-00--tulu3-whole-2
test_cases: 133
model: ollama/tulu3
edit_format: whole
commit_hash: 200295e
pass_rate_1: 21.8
pass_rate_2: 26.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
exhausted_context_windows: 0
command: aider --model ollama/tulu3
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 35.8
total_cost: 0.0000
- dirname: 2024-11-28-14-41-46--granite3-dense-8b-whole-1
test_cases: 133
model: ollama/granite3-dense:8b
edit_format: whole
commit_hash: 200295e
pass_rate_1: 17.3
pass_rate_2: 20.3
percent_cases_well_formed: 78.9
exhausted_context_windows: 0
command: aider --model ollama/granite3-dense:8b
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 38.1
total_cost: 0.0000

View File

@@ -1,6 +1,6 @@
- dirname: 2024-11-09-11-09-15--Qwen2.5-Coder-32B-Instruct
test_cases: 133
model: HuggingFace weights via glhf.chat
model: "HuggingFace via GLHF: BF16"
released: 2024-11-12
edit_format: diff
commit_hash: ec9982a
@@ -16,78 +16,307 @@
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model openai/Qwen2.5-Coder-32B-Instruct
command: aider --model openai/hf:Qwen/Qwen2.5-Coder-32B-Instruct --openai-api-base https://glhf.chat/api/openai/v1
date: 2024-11-09
versions: 0.59.2.dev
seconds_per_case: 22.5
total_cost: 0.0000
- dirname: 2024-11-20-15-17-37--qwen25-32b-or-diff
test_cases: 133
model: openrouter/qwen/qwen-2.5-coder-32b-instruct
- dirname: 2024-11-22-18-56-13--ollama-qwen2.5-coder:32b-instruct-fp16
test_cases: 132
model: "Ollama: fp16"
edit_format: diff
commit_hash: e917424
pass_rate_1: 49.6
pass_rate_2: 65.4
percent_cases_well_formed: 84.2
error_outputs: 43
num_malformed_responses: 31
num_with_malformed_responses: 21
user_asks: 43
lazy_comments: 0
syntax_errors: 2
indentation_errors: 2
exhausted_context_windows: 12
test_timeouts: 2
command: aider --model openrouter/qwen/qwen-2.5-coder-32b-instruct
date: 2024-11-20
versions: 0.63.3.dev
seconds_per_case: 40.7
total_cost: 0.1497
- dirname: 2024-09-20-21-47-17--qwen2.5-32b-instruct-q8_0-whole
test_cases: 133
model: ollama/qwen2.5:32b-instruct-q8_0
edit_format: whole
commit_hash: 2753ac6
pass_rate_1: 46.6
pass_rate_2: 58.6
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 1
commit_hash: f06452c-dirty, 6a0a97c-dirty, 4e9ae16-dirty, 5506d0f-dirty
pass_rate_1: 58.3
pass_rate_2: 71.4
percent_cases_well_formed: 90.2
error_outputs: 27
num_malformed_responses: 26
num_with_malformed_responses: 13
user_asks: 2
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: aider --model ollama/qwen2.5-coder:32b-instruct-fp16
date: 2024-11-22
versions: 0.64.2.dev
seconds_per_case: 119.6
total_cost: 0.0000
- dirname: 2024-11-22-14-53-26--hyperbolic-qwen25coder32binstruct
test_cases: 133
model: "Hyperbolic: BF16"
edit_format: diff
commit_hash: f9ef161, 17aef7b-dirty
pass_rate_1: 57.9
pass_rate_2: 69.2
percent_cases_well_formed: 91.7
error_outputs: 30
num_malformed_responses: 29
num_with_malformed_responses: 11
user_asks: 9
lazy_comments: 0
syntax_errors: 4
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model ollama/qwen2.5:32b-instruct-q8_0
date: 2024-09-20
versions: 0.56.1.dev
seconds_per_case: 1763.7
command: aider --model openai/Qwen/Qwen2.5-Coder-32B-Instruct --openai-api-base https://api.hyperbolic.xyz/v1/
date: 2024-11-22
versions: 0.64.2.dev
seconds_per_case: 33.2
total_cost: 0.0000
- dirname: 2024-11-22-17-53-35--qwen25-coder-32b-Instruct-4bit
test_cases: 133
model: "mlx-community: 4bit"
edit_format: diff
commit_hash: a16dcab-dirty
pass_rate_1: 60.2
pass_rate_2: 72.2
percent_cases_well_formed: 88.7
error_outputs: 31
num_malformed_responses: 30
num_with_malformed_responses: 15
user_asks: 6
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 0
command: aider --model openai/mlx-community/Qwen2.5-Coder-32B-Instruct-4bit
date: 2024-11-23
versions: 0.64.2.dev
seconds_per_case: 53.4
total_cost: 0.0000
- dirname: 2024-09-30-14-09-43--qwen2.5-32b-whole-2
- dirname: 2024-11-23-15-07-20--qwen25-coder-32b-Instruct-8bit
test_cases: 133
model: ollama/qwen2.5:32b
edit_format: whole
commit_hash: 765c4cb
pass_rate_1: 44.4
pass_rate_2: 54.1
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 9
model: "mlx-community: 8bit"
edit_format: diff
commit_hash: a16dcab-dirty
pass_rate_1: 59.4
pass_rate_2: 72.2
percent_cases_well_formed: 92.5
error_outputs: 20
num_malformed_responses: 15
num_with_malformed_responses: 10
user_asks: 7
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 5
test_timeouts: 2
command: aider --model openai/mlx-community/Qwen2.5-Coder-32B-Instruct-8bit
date: 2024-11-23
versions: 0.64.2.dev
seconds_per_case: 98.4
total_cost: 0.0000
- dirname: 2024-11-24-22-18-18--or-all-or-fixed-blank-messages2
test_cases: 133
model: "OpenRouter: multiple"
edit_format: diff
commit_hash: 0c59d32
pass_rate_1: 57.1
pass_rate_2: 67.7
percent_cases_well_formed: 95.5
error_outputs: 56
num_malformed_responses: 10
num_with_malformed_responses: 6
user_asks: 14
lazy_comments: 0
syntax_errors: 6
indentation_errors: 0
exhausted_context_windows: 3
test_timeouts: 1
command: aider --model openrouter/qwen/qwen-2.5-coder-32b-instruct
date: 2024-11-24
versions: 0.64.2.dev
seconds_per_case: 21.2
total_cost: 0.1420
- dirname: 2024-11-23-21-08-53--ollama-qwen2.5-coder:32b-instruct-q4_K_M-8kctx
test_cases: 133
model: "Ollama: q4_K_M"
edit_format: diff
commit_hash: baa1335-dirty, e63df83-dirty, ff8c1aa-dirty
pass_rate_1: 54.9
pass_rate_2: 66.9
percent_cases_well_formed: 94.0
error_outputs: 21
num_malformed_responses: 21
num_with_malformed_responses: 8
user_asks: 5
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model ollama/qwen2.5:32b
date: 2024-09-30
versions: 0.58.1.dev
seconds_per_case: 134.9
command: aider --model ollama/qwen2.5-coder:32b-instruct-q4_K_M
date: 2024-11-23
versions: 0.64.2.dev
seconds_per_case: 35.7
total_cost: 0.0000
- dirname: 2024-11-24-02-23-32--deepinfra-qwen-diff
test_cases: 133
model: "Deepinfra: BF16"
edit_format: diff
commit_hash: bb78e2f
pass_rate_1: 58.6
pass_rate_2: 72.2
percent_cases_well_formed: 94.7
error_outputs: 15
num_malformed_responses: 13
num_with_malformed_responses: 7
user_asks: 3
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 2
test_timeouts: 3
command: aider --model deepinfra/Qwen/Qwen2.5-Coder-32B-Instruct
date: 2024-11-24
versions: 0.64.2.dev
seconds_per_case: 17.5
total_cost: 0.0000
- dirname: 2024-11-24-04-12-58--fireworks-qwen-diff
test_cases: 133
model: "Fireworks: unknown"
edit_format: diff
commit_hash: 757eac0
pass_rate_1: 57.9
pass_rate_2: 72.2
percent_cases_well_formed: 94.0
error_outputs: 23
num_malformed_responses: 19
num_with_malformed_responses: 8
user_asks: 8
lazy_comments: 0
syntax_errors: 6
indentation_errors: 0
exhausted_context_windows: 4
test_timeouts: 1
command: aider --model fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
date: 2024-11-24
versions: 0.64.2.dev
seconds_per_case: 10.4
total_cost: 0.5759
- dirname: 2024-11-24-02-04-59--ollama-qwen2.5-coder:32b-instruct-q2_K-8kctx
test_cases: 133
model: "Ollama: q2_K"
edit_format: diff
commit_hash: 757eac0, bb78e2f, 8d0ba40-dirty, 1d09e96
pass_rate_1: 48.9
pass_rate_2: 61.7
percent_cases_well_formed: 91.7
error_outputs: 32
num_malformed_responses: 32
num_with_malformed_responses: 11
user_asks: 8
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model ollama/qwen2.5-coder:32b-instruct-q2_K
date: 2024-11-24
versions: 0.64.2.dev
seconds_per_case: 97.8
total_cost: 0.0000
- dirname: 2024-11-24-14-56-49--qwen25-32b-or-fireworks
test_cases: 133
model: "Fireworks via OpenRouter: unknown"
edit_format: diff
commit_hash: c2f184f
pass_rate_1: 55.6
pass_rate_2: 67.7
percent_cases_well_formed: 94.0
error_outputs: 39
num_malformed_responses: 24
num_with_malformed_responses: 8
user_asks: 13
lazy_comments: 0
syntax_errors: 1
indentation_errors: 1
exhausted_context_windows: 7
test_timeouts: 4
command: aider --model openrouter/qwen/qwen-2.5-coder-32b-instruct
date: 2024-11-24
versions: 0.64.2.dev
seconds_per_case: 16.1
total_cost: 0.1391
- dirname: 2024-11-24-22-03-19--or-hyperbolic-or-fixed-blank-messages2
test_cases: 133
model: "Hyperbolic via OpenRouter: BF16"
edit_format: diff
commit_hash: 0c59d32
pass_rate_1: 55.6
pass_rate_2: 68.4
percent_cases_well_formed: 89.5
error_outputs: 28
num_malformed_responses: 24
num_with_malformed_responses: 14
user_asks: 29
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 4
test_timeouts: 1
command: aider --model openrouter/qwen/qwen-2.5-coder-32b-instruct
date: 2024-11-24
versions: 0.64.2.dev
seconds_per_case: 41.5
total_cost: 0.1402
- dirname: 2024-11-24-15-00-50--qwen25-32b-or-deepinfra
test_cases: 133
model: "Deepinfra via OpenRouter: BF16"
edit_format: diff
commit_hash: c2f184f
pass_rate_1: 57.1
pass_rate_2: 69.9
percent_cases_well_formed: 89.5
error_outputs: 35
num_malformed_responses: 31
num_with_malformed_responses: 14
user_asks: 11
lazy_comments: 0
syntax_errors: 1
indentation_errors: 1
exhausted_context_windows: 4
test_timeouts: 1
command: aider --model openrouter/qwen/qwen-2.5-coder-32b-instruct
date: 2024-11-24
versions: 0.64.2.dev
seconds_per_case: 28.5
total_cost: 0.1390
- dirname: 2024-11-26-03-15-06--ollama-qwen2.5-coder:32b-instruct-fp16-2kctx
test_cases: 132
model: "Ollama: fp16, 2k ctx"
edit_format: diff
commit_hash: 68be6c5-dirty, 554d274, 2ff3a23, 2ff3a23-dirty, 61759f9, dd48b74, 3ebd47d-dirty
pass_rate_1: 43.2
pass_rate_2: 51.9
percent_cases_well_formed: 46.2
error_outputs: 171
num_malformed_responses: 165
num_with_malformed_responses: 71
user_asks: 97
lazy_comments: 2
syntax_errors: 4
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: "aider --model ollama/qwen2.5-coder:32b-instruct-fp16 # num_ctx: 2048"
date: 2024-11-26
versions: 0.64.2.dev,0.65.1.dev
seconds_per_case: 188.6
total_cost: 0.0000

View File

@@ -13,3 +13,7 @@ def hello():
print("Hello}") # Note: contains a brace
python}
```
{: .note }
People often ask for SHIFT-ENTER to be a soft-newline.
Unfortunately there is no portable way to detect that keystroke in terminals.

View File

@@ -1,16 +1,5 @@
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('quantChart').getContext('2d');
var chartData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = [];
{% for row in site.data.quant %}
allData.push({
@@ -19,51 +8,88 @@ document.addEventListener('DOMContentLoaded', function () {
});
{% endfor %}
allData.forEach(function(row) {
chartData.labels.push(row.model);
chartData.datasets[0].data.push(row.pass_rate_2);
});
// Sort data by pass_rate_2 in descending order
allData.sort((a, b) => b.pass_rate_2 - a.pass_rate_2);
new Chart(ctx, {
type: 'bar',
data: chartData,
options: {
plugins: {
legend: {
display: false
},
title: {
display: true,
text: 'Aider coder editing benchmark',
font: {
size: 16
}
}
},
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: 'Percent completed correctly',
font: {
size: 14
var chart;
function updateChart(filterText) {
var filteredData = allData.filter(row =>
row.model.toLowerCase().includes(filterText.toLowerCase())
);
var chartData = {
labels: filteredData.map(row => row.model),
datasets: [{
label: 'Percent completed correctly',
data: filteredData.map(row => row.pass_rate_2),
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
if (chart) {
chart.data = chartData;
chart.update();
} else {
chart = new Chart(ctx, {
type: 'bar',
data: chartData,
options: {
plugins: {
legend: {
display: false
},
title: {
display: true,
text: 'Aider code editing benchmark',
font: {
size: 16
}
}
},
ticks: {
font: {
size: 16
}
}
},
x: {
ticks: {
font: {
size: 16
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: 'Percent completed correctly',
font: {
size: 14
}
},
ticks: {
font: {
size: 16
}
}
},
x: {
ticks: {
font: {
size: 16
}
},
title: {
display: true,
text: 'Provider: quantization',
font: {
size: 14
}
}
}
}
}
}
});
}
}
// Initial chart render
updateChart('');
// Connect search input to chart filtering
document.getElementById('quantSearchInput').addEventListener('keyup', function() {
updateChart(this.value);
});
});

View File

@@ -1,6 +1,6 @@
---
title: Quantization matters
excerpt: Open source LLMs are becoming very powerful, but pay attention to how you (or your provider) is quantizing the model. It can strongly affect code editing skill.
title: Details matter with open source models
excerpt: Open source LLMs are becoming very powerful, but pay attention to how you (or your provider) are serving the model. It can affect code editing skill.
highlight_image: /assets/quantization.jpg
draft: false
nav_exclude: true
@@ -9,35 +9,186 @@ nav_exclude: true
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Quantization matters
# Details matter with open source models
{: .no_toc }
Open source models like Qwen 2.5 32B are performing very well on
<canvas id="quantChart" width="800" height="600" style="margin: 20px 0"></canvas>
Open source models like Qwen 2.5 32B Instruct are performing very well on
aider's code editing benchmark, rivaling closed source frontier models.
But pay attention to how your model is being quantized, as it
can strongly impact code editing skill.
Heavily quantized models are often used by cloud API providers
and local model servers like Ollama.
<canvas id="quantChart" width="800" height="450" style="margin: 20px 0"></canvas>
But pay attention to how your model is being served and quantized,
as it can impact code editing skill.
Open source models are often available at a variety of quantizations,
and can be served with different token limits.
These details matter when working with code.
The graph above and table below compares different versions of the Qwen 2.5 Coder 32B Instruct model,
served both locally and from a variety of cloud providers.
- The [HuggingFace BF16 weights](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) served via [glhf.chat](https://glhf.chat).
- [4bit and 8bit quants for mlx](https://t.co/cwX3DYX35D).
- The results from [OpenRouter's mix of providers](https://openrouter.ai/qwen/qwen-2.5-coder-32b-instruct/providers) which serve the model with different levels of quantization.
- Results from OpenRouter's providers, both served via OpenRouter and directly to their own APIs.
- Ollama locally serving different quantizations from the [Ollama model library](https://ollama.com/library/qwen2.5-coder:32b-instruct-q4_K_M) with 8k+
context windows.
- An Ollama fp16 quantization served with Ollama's default 2k context window.
### Pitfalls and details
This benchmarking effort highlighted a number of pitfalls and details specific to open source
models which
can have a significant impact on their ability to correctly edit code:
- **Quantization** -- Open source models are often available at dozens of different quantizations.
Most seem to only modestly decrease code editing skill, but stronger quantizations
do have a real impact.
- **Context window** -- Cloud providers can decide how large a context window to accept,
and they often choose differently. Ollama's local API server
defaults to a tiny 2k context window,
and silently discards data that exceeds it. Such a small window has
catastrophic effects on performance, without throwing obvious hard errors.
- **Output token limits** -- Open source models are often served with wildly
differing output token limits. This has a direct impact on how much code the
model can write or edit in a response.
- **Buggy cloud providers** -- While benchmarking Qwen 2.5 Coder 32B Instruct
and DeepSeek V2.5, I discovered
multiple cloud providers with broken or buggy API endpoints.
They seemed
to be returning results different from expected based on the advertised
quantization and context sizes.
The harm caused to the code editing benchmark varied from serious
to catastrophic.
One provider scored 0.5% on the benchmark with DeepSeek V2.5, a highly capable model.
Closed source, proprietary models don't typically have these issues.
They are owned and operated by the organization that created them,
and typically served with specific, predictable context window and output token limits.
Their quantization level is usually unknown, but fixed and unchanging for all users.
### Conclusions
The best versions of the Qwen model rival GPT-4o, while the worst performing
quantization is more like the older GPT-4 Turbo when served competently.
Even an otherwise excellent fp16 quantization falls to GPT-3.5 Turbo levels of performance
if run with Ollama's default 2k context window.
### Sections
{: .no_toc }
- TOC
{:toc}
## Benchmark results
{: .note :}
These are results from single benchmark runs, so expect normal variance of +/- 1-2%.
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% include quant-chart.js %}
</script>
The graph above compares 4 different versions of the Qwen 2.5 32B model,
served both locally and from cloud providers.
<input type="text" id="quantSearchInput" placeholder="Search..." style="width: 100%; max-width: 800px; margin: 10px auto; padding: 8px; display: block; border: 1px solid #ddd; border-radius: 4px;">
- The [HuggingFace weights](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) served via [glhf.chat](https://glhf.chat).
- The results from [OpenRouter's mix of providers](https://openrouter.ai/qwen/qwen-2.5-coder-32b-instruct/providers).
- Two Ollama models run locally.
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
</tr>
</thead>
<tbody>
{% assign quant_sorted = site.data.quant | sort: 'pass_rate_2' | reverse %}
{% for row in quant_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
</tr>
{% endfor %}
</tbody>
</table>
The best version of the model rivals GPT-4o, while the worst performer
is more like GPT-3.5 Turbo.
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>
<script>
document.getElementById('quantSearchInput').addEventListener('keyup', function() {
var input = this.value.toLowerCase();
var rows = document.querySelectorAll('tbody tr');
rows.forEach(function(row) {
var text = row.textContent.toLowerCase();
if(text.includes(input)) {
row.style.display = '';
row.classList.add('selected');
} else {
row.style.display = 'none';
row.classList.remove('selected');
}
});
});
</script>
## Setting Ollama's context window size
[Ollama uses a 2k context window by default](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size),
which is very small for working with aider.
Unlike most other LLM servers, Ollama does not throw an error if you submit
a request that exceeds the context window.
Instead, it just silently truncates the request by discarding the "oldest" messages
in the chat to make it fit within the context window.
Except for the single 2k context result,
all of the Ollama results above were collected with at least an 8k context window.
An 8k window is large enough to attempt all the coding problems in the benchmark.
Aider sets Ollama's context window to 8k by default, starting in aider v0.65.0.
You can change the Ollama server's context window with a
[`.aider.model.settings.yml` file](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
like this:
```
- name: ollama/qwen2.5-coder:32b-instruct-fp16
extra_params:
num_ctx: 8192
```
## Choosing providers with OpenRouter
OpenRouter allows you to ignore specific providers in your
[preferences](https://openrouter.ai/settings/preferences).
This can be effective to exclude highly quantized or otherwise
undesirable providers.
This can be used to limit your OpenRouter requests to be
served by only your preferred providers.
## Notes
This article went through many revisions as I received feedback from
numerous members of the community.
Here are some of the noteworthy learnings and changes:
- The first version of this article included incorrect Ollama models.
- Earlier Ollama results used the too small default 2k context window,
artificially harming the benchmark results.
- The benchmark results appear to have uncovered a problem in the way
OpenRouter was communicating with Hyperbolic.
They fixed the issue 11/24/24, shortly after it was pointed out.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

After

Width:  |  Height:  |  Size: 103 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 146 KiB

After

Width:  |  Height:  |  Size: 148 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -86,9 +86,20 @@
## Specify a file with context window and costs for unknown models
#model-metadata-file: .aider.model.metadata.json
## Add a model alias (can be used multiple times)
#alias: xxx
## Specify multiple values like this:
#alias:
# - xxx
# - yyy
# - zzz
## Verify the SSL cert when connecting to models (default: True)
#verify-ssl: true
## Timeout in seconds for API calls (default: None)
#timeout: xxx
## Specify what edit format the LLM should use (default depends on model)
#edit-format: xxx
@@ -265,7 +276,7 @@
## Enable/disable automatic testing after changes (default: False)
#auto-test: false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#test: false
############
@@ -368,6 +379,9 @@
## Enable/disable fancy input with history and completion (default: True)
#fancy-input: true
## Enable/disable detection and offering to add URLs to chat (default: True)
#detect-urls: true
## Specify which editor to use for the /editor command
#editor: xxx
@@ -379,3 +393,6 @@
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the input device name for voice recording
#voice-input-device: xxx

View File

@@ -90,9 +90,15 @@
## Specify a file with context window and costs for unknown models
#AIDER_MODEL_METADATA_FILE=.aider.model.metadata.json
## Add a model alias (can be used multiple times)
#AIDER_ALIAS=
## Verify the SSL cert when connecting to models (default: True)
#AIDER_VERIFY_SSL=true
## Timeout in seconds for API calls (default: None)
#AIDER_TIMEOUT=
## Specify what edit format the LLM should use (default depends on model)
#AIDER_EDIT_FORMAT=
@@ -264,7 +270,7 @@
## Enable/disable automatic testing after changes (default: False)
#AIDER_AUTO_TEST=false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#AIDER_TEST=false
############
@@ -351,6 +357,9 @@
## Enable/disable fancy input with history and completion (default: True)
#AIDER_FANCY_INPUT=true
## Enable/disable detection and offering to add URLs to chat (default: True)
#AIDER_DETECT_URLS=true
## Specify which editor to use for the /editor command
#AIDER_EDITOR=
@@ -362,3 +371,6 @@
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the input device name for voice recording
#AIDER_VOICE_INPUT_DEVICE=

View File

@@ -11,6 +11,13 @@ description: Configuring advanced settings for LLMs.
In most cases, you can safely ignore aider's warning about unknown context
window size and model costs.
{: .note }
Aider never *enforces* token limits, it only *reports* token limit errors
from the API provider.
You probably don't need to
configure aider with the proper token limits
for unusual models.
But, you can register context window limits and costs for models that aren't known
to aider. Create a `.aider.model.metadata.json` file in one of these locations:
@@ -497,7 +504,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
lazy: false
name: claude-3-5-sonnet-20240620
@@ -516,7 +523,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
lazy: false
name: anthropic/claude-3-5-sonnet-20240620
@@ -535,7 +542,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
lazy: false
name: anthropic/claude-3-5-sonnet-20241022
@@ -554,7 +561,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
lazy: false
name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
@@ -573,7 +580,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
lazy: false
name: anthropic/claude-3-5-sonnet-latest
@@ -592,7 +599,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
lazy: false
name: claude-3-5-sonnet-20241022
@@ -611,7 +618,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
lazy: false
name: anthropic/claude-3-haiku-20240307
reminder: user
@@ -629,7 +636,7 @@ cog.out("```\n")
examples_as_sys_msg: false
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
lazy: false
name: anthropic/claude-3-5-haiku-20241022
reminder: user
@@ -647,7 +654,7 @@ cog.out("```\n")
examples_as_sys_msg: false
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
lazy: false
name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
reminder: user
@@ -665,7 +672,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
lazy: false
name: claude-3-5-haiku-20241022
reminder: user
@@ -700,7 +707,7 @@ cog.out("```\n")
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
lazy: false
name: claude-3-haiku-20240307
reminder: user
@@ -1209,7 +1216,7 @@ cog.out("```\n")
name: openrouter/openai/o1-mini
reminder: user
send_undo_reply: false
streaming: true
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
@@ -1225,7 +1232,7 @@ cog.out("```\n")
name: openrouter/openai/o1-preview
reminder: user
send_undo_reply: false
streaming: true
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false

View File

@@ -142,9 +142,20 @@ cog.outl("```")
## Specify a file with context window and costs for unknown models
#model-metadata-file: .aider.model.metadata.json
## Add a model alias (can be used multiple times)
#alias: xxx
## Specify multiple values like this:
#alias:
# - xxx
# - yyy
# - zzz
## Verify the SSL cert when connecting to models (default: True)
#verify-ssl: true
## Timeout in seconds for API calls (default: None)
#timeout: xxx
## Specify what edit format the LLM should use (default depends on model)
#edit-format: xxx
@@ -321,7 +332,7 @@ cog.outl("```")
## Enable/disable automatic testing after changes (default: False)
#auto-test: false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#test: false
############
@@ -424,6 +435,9 @@ cog.outl("```")
## Enable/disable fancy input with history and completion (default: True)
#fancy-input: true
## Enable/disable detection and offering to add URLs to chat (default: True)
#detect-urls: true
## Specify which editor to use for the /editor command
#editor: xxx
@@ -435,5 +449,8 @@ cog.outl("```")
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the input device name for voice recording
#voice-input-device: xxx
```
<!--[[[end]]]-->

View File

@@ -132,9 +132,15 @@ cog.outl("```")
## Specify a file with context window and costs for unknown models
#AIDER_MODEL_METADATA_FILE=.aider.model.metadata.json
## Add a model alias (can be used multiple times)
#AIDER_ALIAS=
## Verify the SSL cert when connecting to models (default: True)
#AIDER_VERIFY_SSL=true
## Timeout in seconds for API calls (default: None)
#AIDER_TIMEOUT=
## Specify what edit format the LLM should use (default depends on model)
#AIDER_EDIT_FORMAT=
@@ -306,7 +312,7 @@ cog.outl("```")
## Enable/disable automatic testing after changes (default: False)
#AIDER_AUTO_TEST=false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#AIDER_TEST=false
############
@@ -393,6 +399,9 @@ cog.outl("```")
## Enable/disable fancy input with history and completion (default: True)
#AIDER_FANCY_INPUT=true
## Enable/disable detection and offering to add URLs to chat (default: True)
#AIDER_DETECT_URLS=true
## Specify which editor to use for the /editor command
#AIDER_EDITOR=
@@ -404,7 +413,8 @@ cog.outl("```")
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the input device name for voice recording
#AIDER_VOICE_INPUT_DEVICE=
```
<!--[[[end]]]-->

View File

@@ -0,0 +1,72 @@
---
parent: Configuration
nav_order: 1000
description: Assign convenient short names to models.
---
# Model Aliases
Model aliases allow you to create shorthand names for models you frequently use. This is particularly useful for models with long names or when you want to standardize model usage across your team.
## Command Line Usage
You can define aliases when launching aider using the `--alias` option:
```bash
aider --alias "fast:gpt-3.5-turbo" --alias "smart:gpt-4"
```
Multiple aliases can be defined by using the `--alias` option multiple times. Each alias definition should be in the format `alias:model-name`.
## Configuration File
You can also define aliases in your [`.aider.conf.yml` file](https://aider.chat/docs/config/aider_conf.html):
```yaml
alias:
- "fast:gpt-3.5-turbo"
- "smart:gpt-4"
- "hacker:claude-3-sonnet-20240229"
```
## Using Aliases
Once defined, you can use the alias instead of the full model name:
```bash
aider --model fast # Uses gpt-3.5-turbo
aider --model smart # Uses gpt-4
```
## Built-in Aliases
Aider includes some built-in aliases for convenience:
<!--[[[cog
import cog
from aider.models import MODEL_ALIASES
for alias, model in sorted(MODEL_ALIASES.items()):
cog.outl(f"- `{alias}`: {model}")
]]]-->
- `3`: gpt-3.5-turbo
- `35-turbo`: gpt-3.5-turbo
- `35turbo`: gpt-3.5-turbo
- `4`: gpt-4-0613
- `4-turbo`: gpt-4-1106-preview
- `4o`: gpt-4o-2024-08-06
- `deepseek`: deepseek/deepseek-coder
- `haiku`: claude-3-5-haiku-20241022
- `opus`: claude-3-opus-20240229
- `sonnet`: claude-3-5-sonnet-20241022
<!--[[[end]]]-->
## Priority
If the same alias is defined in multiple places, the priority is:
1. Command line aliases (highest priority)
2. Configuration file aliases
3. Built-in aliases (lowest priority)
This allows you to override built-in aliases with your own preferences.

View File

@@ -32,9 +32,9 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--openai-api-type] [--openai-api-version]
[--openai-api-deployment-id] [--openai-organization-id]
[--model-settings-file] [--model-metadata-file]
[--verify-ssl | --no-verify-ssl] [--edit-format]
[--architect] [--weak-model] [--editor-model]
[--editor-edit-format]
[--alias] [--verify-ssl | --no-verify-ssl] [--timeout]
[--edit-format] [--architect] [--weak-model]
[--editor-model] [--editor-edit-format]
[--show-model-warnings | --no-show-model-warnings]
[--max-chat-history-tokens] [--env-file]
[--cache-prompts | --no-cache-prompts]
@@ -73,8 +73,10 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--message-file] [--load] [--encoding] [-c]
[--gui | --no-gui | --browser | --no-browser]
[--suggest-shell-commands | --no-suggest-shell-commands]
[--fancy-input | --no-fancy-input] [--editor]
[--fancy-input | --no-fancy-input]
[--detect-urls | --no-detect-urls] [--editor]
[--voice-format] [--voice-language]
[--voice-input-device]
```
@@ -191,6 +193,10 @@ Specify a file with context window and costs for unknown models
Default: .aider.model.metadata.json
Environment variable: `AIDER_MODEL_METADATA_FILE`
### `--alias ALIAS:MODEL`
Add a model alias (can be used multiple times)
Environment variable: `AIDER_ALIAS`
### `--verify-ssl`
Verify the SSL cert when connecting to models (default: True)
Default: True
@@ -199,6 +205,10 @@ Aliases:
- `--verify-ssl`
- `--no-verify-ssl`
### `--timeout VALUE`
Timeout in seconds for API calls (default: None)
Environment variable: `AIDER_TIMEOUT`
### `--edit-format EDIT_FORMAT`
Specify what edit format the LLM should use (default depends on model)
Environment variable: `AIDER_EDIT_FORMAT`
@@ -504,7 +514,7 @@ Aliases:
- `--no-auto-test`
### `--test`
Run tests and fix problems found
Run tests, fix problems found and then exit
Default: False
Environment variable: `AIDER_TEST`
@@ -673,6 +683,14 @@ Aliases:
- `--fancy-input`
- `--no-fancy-input`
### `--detect-urls`
Enable/disable detection and offering to add URLs to chat (default: True)
Default: True
Environment variable: `AIDER_DETECT_URLS`
Aliases:
- `--detect-urls`
- `--no-detect-urls`
### `--editor VALUE`
Specify which editor to use for the /editor command
Environment variable: `AIDER_EDITOR`
@@ -688,4 +706,8 @@ Environment variable: `AIDER_VOICE_FORMAT`
Specify the language for voice using ISO 639-1 code (default: auto)
Default: en
Environment variable: `AIDER_VOICE_LANGUAGE`
### `--voice-input-device VOICE_INPUT_DEVICE`
Specify the input device name for voice recording
Environment variable: `AIDER_VOICE_INPUT_DEVICE`
<!--[[[end]]]-->

View File

@@ -169,7 +169,10 @@ python -m aider
## Can I change the system prompts that aider uses?
Aider is set up to support different system prompts and edit formats
The most convenient way to add custom instructions is to use a
[conventions file](https://aider.chat/docs/usage/conventions.html).
But, aider is set up to support different actual system prompts and edit formats
in a modular way. If you look in the `aider/coders` subdirectory, you'll
see there's a base coder with base prompts, and then there are
a number of
@@ -210,13 +213,25 @@ You can also refer to the
## How are the "aider wrote xx% of code" stats computed?
[Aider is tightly integrated with git](/docs/git.html) so all
one of aider's code changes are committed to the repo with proper attribution.
of aider's code changes are committed to the repo with proper attribution.
The
[stats are computed](https://github.com/Aider-AI/aider/blob/main/scripts/blame.py)
by doing something like `git blame` on the repo,
and counting up who wrote all the new lines of code in each release.
Only lines in source code files are counted, not documentation or prompt files.
## Why is the LLM speaking to me in an unexpected language?
Aider goes to some effort to prompt the model to use the language that is configured
for your system.
But LLMs aren't fully reliable, and they sometimes decide to speak in
an unexpected language.
Claude is especially fond of speaking French.
You can explicitly set the language that aider tells the model to use with
`--chat-language <language>`.
But the LLM may not comply.
## Can I share my aider chat transcript?
Yes, you can now share aider chat logs in a pretty way.

View File

@@ -53,6 +53,7 @@ Installing PortAudio is completely optional, but can usually be accomplished lik
- For Windows, there is no need to install PortAudio.
- For Mac, do `brew install portaudio`
- For Linux, do `sudo apt-get install libportaudio2`
- Some linux environments may also need `sudo apt install libasound2-plugins`
## Add aider to your editor

View File

@@ -181,6 +181,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
November 21, 2024.
November 28, 2024.
<!--[[[end]]]-->
</p>

View File

@@ -1,22 +0,0 @@
---
parent: Connecting to LLMs
nav_order: 850
---
# Editing format
Aider uses different "edit formats" to collect code edits from different LLMs.
The "whole" format is the easiest for an LLM to use, but it uses a lot of tokens
and may limit how large a file can be edited.
Models which can use one of the diff formats are much more efficient,
using far fewer tokens.
Models that use a diff-like format are able to
edit larger files with less cost and without hitting token limits.
Aider is configured to use the best edit format for the popular OpenAI and Anthropic models
and the [other models recommended on the LLM page](https://aider.chat/docs/llms.html).
For lesser known models aider will default to using the "whole" editing format
since it is the easiest format for an LLM to use.
If you would like to experiment with the more advanced formats, you can
use these switches: `--edit-format diff` or `--edit-format udiff`.

View File

@@ -0,0 +1,26 @@
---
parent: Connecting to LLMs
nav_order: 400
---
# LM Studio
To use LM Studio:
```
python -m pip install -U aider-chat
export LM_STUDIO_API_KEY=<key> # Mac/Linux
setx LM_STUDIO_API_KEY <key> # Windows, restart shell after setx
export LM_STUDIO_API_BASE=<url> # Mac/Linux
setx LM_STUDIO_API_BASE <url> # Windows, restart shell after setx
aider --model lm_studio/<your-model-name>
```
See the [model warnings](warnings.html)
section for information on warnings which will occur
when working with models that aider is not familiar with.

View File

@@ -20,24 +20,49 @@ python -m pip install -U aider-chat
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
aider --model ollama/<model>
aider --model ollama_chat/<model>
```
In particular, `llama3:70b` works well with aider:
{: .note }
Using `ollama_chat/` is recommended over `ollama/`.
```
ollama pull llama3:70b
ollama serve
# In another terminal window...
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
aider --model ollama/llama3:70b
```
See the [model warnings](warnings.html)
section for information on warnings which will occur
when working with models that aider is not familiar with.
## API Key
If you are using an ollama that requires an API key you can set `OLLAMA_API_KEY`:
```
export OLLAMA_API_KEY=<api-key> # Mac/Linux
setx OLLAMA_API_KEY <api-key> # Windows, restart shell after setx
```
## Setting the context window size
[Ollama uses a 2k context window by default](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size),
which is very small for working with aider.
Aider sets Ollama's context window to 8k by default.
If you would like
a larger context window
you can use a
[`.aider.model.settings.yml` file](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
like this:
```
- name: ollama/qwen2.5-coder:32b-instruct-fp16
extra_params:
num_ctx: 8192
```
Unlike most other LLM servers, Ollama does not throw an error if you submit
a request that exceeds the context window.
Instead, it just silently truncates the request by discarding the "oldest" messages
in the chat to make it fit within the context window.
So if your context window is too small, you won't get an error.
Aider will probably just fail to work well and experience
a lot of
[file editing problems](https://aider.chat/docs/troubleshooting/edit-errors.html).

View File

@@ -15,10 +15,10 @@ python -m pip install -U aider-chat
export XAI_API_KEY=<key> # Mac/Linux
setx XAI_API_KEY <key> # Windows, restart shell after setx
aider --model xai/groq-beta
aider --model xai/grok-beta
# List models available from xAI
aider --list-models groq/
aider --list-models xai/
```

View File

@@ -96,5 +96,5 @@ coder = Coder.create(model=model, fnames=fnames, io=io)
```
{: .note }
The scripting API is not officially supported or documented and may
change without warning.
The python scripting API is not officially supported or documented,
and could change in future releases without providing backwards compatibility.

View File

@@ -42,7 +42,14 @@ disobeying the system prompt instructions.
Most local models are just barely capable of working with aider,
so editing errors are probably unavoidable.
Local models which have been quantized are even more likely to have problems
## Local models: context window and quantization
Be especially careful about the
[Ollama context window](https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size)
when working with local models.
It defaults to be very small and silently discards data if you exceed it.
Local models which have been quantized are more likely to have editing problems
because they are not capable enough to follow aider's system prompts.
## Try the whole edit format

View File

@@ -33,6 +33,13 @@ To reduce output tokens:
For more info: https://aider.chat/docs/token-limits.html
```
{: .note }
Aider never *enforces* token limits, it only *reports* token limit errors
from the API provider.
You probably don't need to
[configure aider with the proper token limits](https://aider.chat/docs/config/adv-model-settings.html#context-window-size-and-token-costs)
for unusual models.
## Input tokens & context window size
The most common problem is trying to send too much data to a

View File

@@ -77,8 +77,10 @@ The interactive prompt is built with [prompt-toolkit](https://github.com/prompt-
### Emacs
- `Up Arrow` : Scroll back through previously sent messages.
- `Down Arrow` : Scroll forward through previously sent messages.
- `Up Arrow` : Move up one line in the current message.
- `Down Arrow` : Move down one line in the current message.
- `Ctrl-Up` : Scroll back through previously sent messages.
- `Ctrl-Down` : Scroll forward through previously sent messages.
- `Ctrl-A` : Move cursor to the start of the line.
- `Ctrl-B` : Move cursor back one character.
- `Ctrl-D` : Delete the character under the cursor.
@@ -95,8 +97,10 @@ The interactive prompt is built with [prompt-toolkit](https://github.com/prompt-
To use vi/vim keybindings, run aider with the `--vim` switch.
- `Up Arrow` : Scroll back through previously sent messages.
- `Down Arrow` : Scroll forward through previously sent messages.
- `Up Arrow` : Move up one line in the current message.
- `Down Arrow` : Move down one line in the current message.
- `Ctrl-Up` : Scroll back through previously sent messages.
- `Ctrl-Down` : Scroll forward through previously sent messages.
- `Esc` : Switch to command mode.
- `i` : Switch to insert mode.
- `a` : Move cursor one character to the right and switch to insert mode.

View File

@@ -1,7 +1,7 @@
---
parent: Usage
nav_order: 60
description: Using the chat, ask and help chat modes.
description: Using the code, architect, ask and help chat modes.
---
# Chat modes

View File

@@ -47,10 +47,44 @@ def find_latest_benchmark_dir():
print("Error: No benchmark directories found under tmp.benchmarks.")
sys.exit(1)
latest_dir = max(
benchmark_dirs,
key=lambda d: next((f.stat().st_mtime for f in d.rglob("*.md") if f.is_file()), 0),
)
# Get current time and 24 hours ago
now = datetime.datetime.now()
day_ago = now - datetime.timedelta(days=1)
# Filter directories by name pattern YYYY-MM-DD-HH-MM-SS--
recent_dirs = []
for d in benchmark_dirs:
try:
# Extract datetime from directory name
date_str = d.name[:19] # Takes YYYY-MM-DD-HH-MM-SS
dir_date = datetime.datetime.strptime(date_str, "%Y-%m-%d-%H-%M-%S")
if dir_date >= day_ago:
recent_dirs.append(d)
except ValueError:
# Skip directories that don't match the expected format
continue
if not recent_dirs:
print("Error: No benchmark directories found from the last 24 hours.")
sys.exit(1)
# Find directory with most recently modified .md file
latest_dir = None
latest_time = 0
for d in recent_dirs:
# Look for .md files in subdirectories
for md_file in d.glob("*/.*.md"):
if md_file.is_file():
mtime = md_file.stat().st_mtime
if mtime > latest_time:
latest_time = mtime
latest_dir = d
if not latest_dir:
print("Error: No .md files found in recent benchmark directories.")
sys.exit(1)
print(f"Using the most recently updated benchmark directory: {latest_dir.name}")
return latest_dir
@@ -155,6 +189,9 @@ def main(
tries: int = typer.Option(2, "--tries", "-r", help="Number of tries for running tests"),
threads: int = typer.Option(1, "--threads", "-t", help="Number of threads to run in parallel"),
num_tests: int = typer.Option(-1, "--num-tests", "-n", help="Number of tests to run"),
num_ctx: Optional[int] = typer.Option(
None, "--num-ctx", help="Override model context window size"
),
exercises_dir: str = typer.Option(
EXERCISES_DIR_DEFAULT, "--exercises-dir", help="Directory with exercise files"
),
@@ -247,6 +284,7 @@ def main(
max_apply_update_errors,
editor_model,
editor_edit_format,
num_ctx,
)
all_results.append(results)
@@ -526,6 +564,7 @@ def run_test_real(
max_apply_update_errors,
editor_model,
editor_edit_format,
num_ctx=None,
):
if not os.path.isdir(testdir):
print("Not a dir:", testdir)
@@ -588,6 +627,11 @@ def run_test_real(
editor_model=editor_model,
editor_edit_format=editor_edit_format,
)
if num_ctx:
if not main_model.extra_params:
main_model.extra_params = {}
main_model.extra_params["num_ctx"] = num_ctx
edit_format = edit_format or main_model.edit_format
dump(main_model)

View File

@@ -2,6 +2,7 @@
docker run \
-it --rm \
--add-host=host.docker.internal:host-gateway \
-v `pwd`:/aider \
-v `pwd`/tmp.benchmarks/.:/benchmarks \
-e OPENAI_API_KEY=$OPENAI_API_KEY \

View File

@@ -1,278 +1,168 @@
from dataclasses import dataclass
from datetime import date
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import yaml
from imgcat import imgcat
from matplotlib import rc
from aider.dump import dump # noqa: 401
LABEL_FONT_SIZE = 16 # Font size for scatter plot dot labels
@dataclass
class ModelData:
name: str
release_date: date
pass_rate: float
@property
def color(self) -> str:
model = self.name.lower()
if "gemini" in model and "pro" in model:
return "magenta"
if "qwen" in model:
return "darkblue"
if "mistral" in model:
return "cyan"
if "haiku" in model:
return "pink"
if "deepseek" in model:
return "brown"
if "sonnet" in model:
return "orange"
if "-4o" in model:
return "purple"
if "gpt-4" in model:
return "red"
if "gpt-3.5" in model:
return "green"
return "lightblue"
@property
def legend_label(self) -> str:
model = self.name.lower()
if "gemini" in model and "pro" in model:
return "Gemini 1.5 Pro"
if "claude-3-sonnet" in model:
return "Sonnet"
if "o1-preview" in model:
return "O1 Preview"
if "gpt-3.5" in model:
return "GPT-3.5 Turbo"
if "gpt-4-" in model and "-4o" not in model:
return "GPT-4"
if "qwen" in model:
return "Qwen"
if "-4o" in model:
return "GPT-4o"
if "haiku" in model:
return "Haiku"
if "deepseek" in model:
return "DeepSeek"
if "mistral" in model:
return "Mistral"
return model
def get_legend_label(model):
model = model.lower()
if "claude-3-sonnet" in model:
return "Sonnet"
if "o1-preview" in model:
return "O1 Preview"
if "gpt-3.5" in model:
return "GPT-3.5 Turbo"
if "gpt-4-" in model and "-4o" not in model:
return "GPT-4"
if "qwen" in model:
return "Qwen"
if "-4o" in model:
return "GPT-4o"
if "haiku" in model:
return "Haiku"
if "deepseek" in model:
return "DeepSeek"
if "mistral" in model:
return "Mistral"
if "o1-preview" in model:
return "o1-preview"
return model
class BenchmarkPlotter:
LABEL_FONT_SIZE = 16
def __init__(self):
self.setup_plot_style()
def get_model_color(model):
default = "lightblue"
def setup_plot_style(self):
plt.rcParams["hatch.linewidth"] = 0.5
plt.rcParams["hatch.color"] = "#444444"
rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"], "size": 10})
plt.rcParams["text.color"] = "#444444"
if model == "gpt-4o-mini":
return default
def load_data(self, yaml_file: str) -> List[ModelData]:
with open(yaml_file, "r") as file:
data = yaml.safe_load(file)
if "qwen" in model.lower():
return "darkblue"
models = []
for entry in data:
if "released" in entry and "pass_rate_2" in entry:
model = ModelData(
name=entry["model"].split("(")[0].strip(),
release_date=entry["released"],
pass_rate=entry["pass_rate_2"],
)
models.append(model)
return models
if "mistral" in model.lower():
return "cyan"
def create_figure(self) -> Tuple[plt.Figure, plt.Axes]:
fig, ax = plt.subplots(figsize=(12, 8))
ax.grid(axis="y", zorder=0, lw=0.2)
for spine in ax.spines.values():
spine.set_edgecolor("#DDDDDD")
spine.set_linewidth(0.5)
return fig, ax
if "haiku" in model.lower():
return "pink"
def plot_model_series(self, ax: plt.Axes, models: List[ModelData]):
# Group models by color
color_groups: Dict[str, List[ModelData]] = {}
for model in models:
if model.color not in color_groups:
color_groups[model.color] = []
color_groups[model.color].append(model)
if "deepseek" in model.lower():
return "brown"
# Plot each color group
for color, group in color_groups.items():
sorted_group = sorted(group, key=lambda x: x.release_date)
dates = [m.release_date for m in sorted_group]
rates = [m.pass_rate for m in sorted_group]
if "sonnet" in model.lower():
return "orange"
# Plot line
ax.plot(dates, rates, c=color, alpha=0.5, linewidth=1)
if "-4o" in model:
return "purple"
# Plot points
ax.scatter(dates, rates, c=color, alpha=0.5, s=120)
if "gpt-4" in model:
return "red"
# Add label for first point
first_model = sorted_group[0]
ax.annotate(
first_model.legend_label,
(first_model.release_date, first_model.pass_rate),
xytext=(10, 5),
textcoords="offset points",
color=color,
alpha=0.8,
fontsize=self.LABEL_FONT_SIZE,
)
if "gpt-3.5" in model:
return "green"
return default
def plot_over_time(yaml_file):
with open(yaml_file, "r") as file:
data = yaml.safe_load(file)
dates = []
pass_rates = []
models = []
print("Debug: Raw data from YAML file:")
print(data)
for entry in data:
if "released" in entry and "pass_rate_2" in entry:
dates.append(entry["released"])
pass_rates.append(entry["pass_rate_2"])
models.append(entry["model"].split("(")[0].strip())
print("Debug: Processed data:")
print("Dates:", dates)
print("Pass rates:", pass_rates)
print("Models:", models)
if not dates or not pass_rates:
print(
"Error: No data to plot. Check if the YAML file is empty or if the data is in the"
" expected format."
def set_labels_and_style(self, ax: plt.Axes):
ax.set_xlabel("Model release date", fontsize=18, color="#555")
ax.set_ylabel(
"Aider code editing benchmark,\npercent completed correctly", fontsize=18, color="#555"
)
return
ax.set_title("LLM code editing skill by model release date", fontsize=20)
ax.set_ylim(30, 90)
plt.xticks(fontsize=14, rotation=45, ha="right")
plt.tight_layout(pad=1.0)
plt.rcParams["hatch.linewidth"] = 0.5
plt.rcParams["hatch.color"] = "#444444"
def save_and_display(self, fig: plt.Figure):
plt.savefig("aider/website/assets/models-over-time.png")
plt.savefig("aider/website/assets/models-over-time.svg")
imgcat(fig)
rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"], "size": 10})
plt.rcParams["text.color"] = "#444444"
fig, ax = plt.subplots(figsize=(12, 8)) # Make figure square
print("Debug: Figure created. Plotting data...")
ax.grid(axis="y", zorder=0, lw=0.2)
for spine in ax.spines.values():
spine.set_edgecolor("#DDDDDD")
spine.set_linewidth(0.5)
colors = [get_model_color(model) for model in models]
# Separate data points by color
purple_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "purple"]
red_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "red"]
green_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "green"]
orange_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "orange"]
brown_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "brown"]
pink_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "pink"]
qwen_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "darkblue"]
mistral_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "cyan"]
# Create a mapping of colors to first points and labels
color_to_first_point = {}
color_to_label = {}
for date, rate, color, model in sorted(zip(dates, pass_rates, colors, models)):
if color not in color_to_first_point:
color_to_first_point[color] = (date, rate)
color_to_label[color] = get_legend_label(model)
# Plot lines and add labels at first points
if purple_points:
purple_dates, purple_rates = zip(*sorted(purple_points))
ax.plot(purple_dates, purple_rates, c="purple", alpha=0.5, linewidth=1)
if "purple" in color_to_first_point:
date, rate = color_to_first_point["purple"]
ax.annotate(
color_to_label["purple"],
(date, rate),
xytext=(10, 5),
textcoords="offset points",
color="purple",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
if red_points:
red_dates, red_rates = zip(*sorted(red_points))
ax.plot(red_dates, red_rates, c="red", alpha=0.5, linewidth=1)
if "red" in color_to_first_point:
date, rate = color_to_first_point["red"]
ax.annotate(
color_to_label["red"],
(date, rate),
xytext=(10, 5),
textcoords="offset points",
color="red",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
if green_points:
green_dates, green_rates = zip(*sorted(green_points))
ax.plot(green_dates, green_rates, c="green", alpha=0.5, linewidth=1)
if "green" in color_to_first_point:
date, rate = color_to_first_point["green"]
ax.annotate(
color_to_label["green"],
(date, rate),
xytext=(10, 5),
textcoords="offset points",
color="green",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
if orange_points:
orange_dates, orange_rates = zip(*sorted(orange_points))
ax.plot(orange_dates, orange_rates, c="orange", alpha=0.5, linewidth=1)
if "orange" in color_to_first_point:
date, rate = color_to_first_point["orange"]
ax.annotate(
color_to_label["orange"],
(date, rate),
xytext=(10, 5),
textcoords="offset points",
color="orange",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
if brown_points:
brown_dates, brown_rates = zip(*sorted(brown_points))
ax.plot(brown_dates, brown_rates, c="brown", alpha=0.5, linewidth=1)
if "brown" in color_to_first_point:
date, rate = color_to_first_point["brown"]
ax.annotate(
color_to_label["brown"],
(date, rate),
xytext=(10, -10),
textcoords="offset points",
color="brown",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
if pink_points:
pink_dates, pink_rates = zip(*sorted(pink_points))
ax.plot(pink_dates, pink_rates, c="pink", alpha=0.5, linewidth=1)
if "pink" in color_to_first_point:
date, rate = color_to_first_point["pink"]
ax.annotate(
color_to_label["pink"],
(date, rate),
xytext=(10, 5),
textcoords="offset points",
color="pink",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
if qwen_points:
qwen_dates, qwen_rates = zip(*sorted(qwen_points))
ax.plot(qwen_dates, qwen_rates, c="darkblue", alpha=0.5, linewidth=1)
if "darkblue" in color_to_first_point:
date, rate = color_to_first_point["darkblue"]
ax.annotate(
color_to_label["darkblue"],
(date, rate),
xytext=(10, 5),
textcoords="offset points",
color="darkblue",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
if mistral_points:
mistral_dates, mistral_rates = zip(*sorted(mistral_points))
ax.plot(mistral_dates, mistral_rates, c="cyan", alpha=0.5, linewidth=1)
if "cyan" in color_to_first_point:
date, rate = color_to_first_point["cyan"]
ax.annotate(
color_to_label["cyan"],
(date, rate),
xytext=(10, -10),
textcoords="offset points",
color="cyan",
alpha=0.8,
fontsize=LABEL_FONT_SIZE,
)
# Plot points without legend
for date, rate, color in zip(dates, pass_rates, colors):
ax.scatter([date], [rate], c=[color], alpha=0.5, s=120)
ax.set_xlabel("Model release date", fontsize=18, color="#555")
ax.set_ylabel(
"Aider code editing benchmark,\npercent completed correctly", fontsize=18, color="#555"
)
ax.set_title("LLM code editing skill by model release date", fontsize=20)
ax.set_ylim(30, 90) # Adjust y-axis limit to accommodate higher values
plt.xticks(fontsize=14, rotation=45, ha="right") # Rotate x-axis labels for better readability
plt.tight_layout(pad=1.0) # Adjust layout since we don't need room for legend anymore
print("Debug: Saving figures...")
plt.savefig("tmp_over_time.png")
plt.savefig("tmp_over_time.svg")
print("Debug: Displaying figure with imgcat...")
imgcat(fig)
print("Debug: Figure generation complete.")
def plot(self, yaml_file: str):
models = self.load_data(yaml_file)
fig, ax = self.create_figure()
self.plot_model_series(ax, models)
self.set_labels_and_style(ax)
self.save_and_display(fig)
# Example usage
plot_over_time("aider/website/_data/edit_leaderboard.yml")
def main():
plotter = BenchmarkPlotter()
models = plotter.load_data("aider/website/_data/edit_leaderboard.yml")
# Print release dates and model names
for model in sorted(models, key=lambda x: x.release_date):
print(f"{model.release_date}: {model.name}")
plotter.plot("aider/website/_data/edit_leaderboard.yml")
if __name__ == "__main__":
main()

View File

@@ -67,7 +67,7 @@ requires = ["setuptools>=68", "setuptools_scm[toml]>=8"]
build-backend = "setuptools.build_meta"
[tool.setuptools_scm]
write_to = "aider/__version__.py"
write_to = "aider/_version.py"
[tool.codespell]
skip = "*.svg,Gemfile.lock"

View File

@@ -6,5 +6,7 @@ testpaths =
tests/help
tests/browser
tests/scrape
env =
AIDER_ANALYTICS=false

View File

@@ -6,7 +6,7 @@
#
aiohappyeyeballs==2.4.3
# via aiohttp
aiohttp==3.11.2
aiohttp==3.11.7
# via litellm
aiosignal==1.3.1
# via aiohttp
@@ -62,11 +62,11 @@ gitdb==4.0.11
# via gitpython
gitpython==3.1.43
# via -r requirements/requirements.in
grep-ast==0.3.3
grep-ast==0.4.1
# via -r requirements/requirements.in
h11==0.14.0
# via httpcore
httpcore==1.0.6
httpcore==1.0.7
# via httpx
httpx==0.27.2
# via openai
@@ -86,9 +86,9 @@ importlib-resources==6.4.5
# via -r requirements/requirements.in
jinja2==3.1.4
# via litellm
jiter==0.7.1
jiter==0.8.0
# via openai
json5==0.9.28
json5==0.10.0
# via -r requirements/requirements.in
jsonschema==4.23.0
# via
@@ -96,7 +96,7 @@ jsonschema==4.23.0
# litellm
jsonschema-specifications==2024.10.1
# via jsonschema
litellm==1.52.8
litellm==1.52.16
# via -r requirements/requirements.in
markdown-it-py==3.0.0
# via rich
@@ -120,7 +120,7 @@ numpy==1.26.4
# via
# -r requirements/requirements.in
# scipy
openai==1.54.4
openai==1.55.2
# via litellm
packaging==24.2
# via
@@ -134,7 +134,7 @@ pexpect==4.9.0
# via -r requirements/requirements.in
pillow==10.4.0
# via -r requirements/requirements.in
posthog==3.7.0
posthog==3.7.3
# via -r requirements/requirements.in
prompt-toolkit==3.0.48
# via -r requirements/requirements.in
@@ -150,11 +150,11 @@ pycodestyle==2.12.1
# via flake8
pycparser==2.22
# via cffi
pydantic==2.9.2
pydantic==2.10.2
# via
# litellm
# openai
pydantic-core==2.23.4
pydantic-core==2.27.1
# via pydantic
pydub==0.25.1
# via -r requirements/requirements.in
@@ -219,7 +219,7 @@ tokenizers==0.19.1
# via
# -r requirements/requirements.in
# litellm
tqdm==4.67.0
tqdm==4.67.1
# via
# huggingface-hub
# openai
@@ -241,7 +241,7 @@ urllib3==2.2.3
# requests
wcwidth==0.2.13
# via prompt-toolkit
yarl==1.17.1
yarl==1.18.0
# via aiohttp
zipp==3.21.0
# via importlib-metadata

View File

@@ -4,12 +4,12 @@
#
# pip-compile --constraint=requirements.txt --constraint=requirements/requirements-dev.txt --constraint=requirements/requirements-help.txt --output-file=requirements/requirements-browser.txt requirements/requirements-browser.in
#
altair==5.4.1
altair==5.5.0
# via streamlit
attrs==24.2.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-help.txt
# jsonschema
# referencing
@@ -19,85 +19,85 @@ cachetools==5.5.0
# via streamlit
certifi==2024.8.30
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# requests
charset-normalizer==3.4.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# requests
click==8.1.7
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# streamlit
gitdb==4.0.11
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# gitpython
gitpython==3.1.43
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# streamlit
idna==3.10
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# requests
jinja2==3.1.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# altair
# pydeck
jsonschema==4.23.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# altair
jsonschema-specifications==2024.10.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# jsonschema
markdown-it-py==3.0.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# rich
markupsafe==3.0.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# jinja2
mdurl==0.1.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# markdown-it-py
narwhals==1.13.5
narwhals==1.14.2
# via altair
numpy==1.26.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# pandas
@@ -105,8 +105,8 @@ numpy==1.26.4
# streamlit
packaging==24.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# altair
@@ -117,27 +117,27 @@ pandas==2.2.3
# streamlit
pillow==10.4.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# streamlit
protobuf==5.28.3
# via streamlit
pyarrow==18.0.0
pyarrow==18.1.0
# via streamlit
pydeck==0.9.1
# via streamlit
pygments==2.18.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# rich
python-dateutil==2.9.0.post0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# pandas
pytz==2024.2
@@ -146,41 +146,41 @@ pytz==2024.2
# pandas
referencing==0.35.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# jsonschema
# jsonschema-specifications
requests==2.32.3
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# streamlit
rich==13.9.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# streamlit
rpds-py==0.21.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# jsonschema
# referencing
six==1.16.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# python-dateutil
smmap==5.0.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# gitdb
streamlit==1.40.1
streamlit==1.40.2
# via -r requirements/requirements-browser.in
tenacity==8.5.0
# via
@@ -188,12 +188,12 @@ tenacity==8.5.0
# streamlit
toml==0.10.2
# via streamlit
tornado==6.4.1
tornado==6.4.2
# via streamlit
typing-extensions==4.12.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# altair
@@ -204,8 +204,8 @@ tzdata==2024.2
# pandas
urllib3==2.2.3
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# requests

View File

@@ -3,6 +3,7 @@
# pip-compile --output-file=requirements-dev.txt requirements-dev.in --upgrade
#
pytest
pytest-env
pip-tools
lox
matplotlib

View File

@@ -12,20 +12,20 @@ build==1.2.2.post1
# via pip-tools
certifi==2024.8.30
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# requests
cfgv==3.4.0
# via pre-commit
charset-normalizer==3.4.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# requests
click==8.1.7
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# pip-tools
# typer
codespell==2.3.0
@@ -48,28 +48,28 @@ docutils==0.21.2
# sphinx-rtd-theme
filelock==3.16.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# virtualenv
fonttools==4.55.0
# via matplotlib
identify==2.6.2
identify==2.6.3
# via pre-commit
idna==3.10
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# requests
imagesize==1.4.1
# via sphinx
imgcat==0.5.0
imgcat==0.6.0
# via -r requirements/requirements-dev.in
iniconfig==2.0.0
# via pytest
jinja2==3.1.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# sphinx
kiwisolver==1.4.7
# via matplotlib
@@ -77,20 +77,20 @@ lox==0.12.0
# via -r requirements/requirements-dev.in
markdown-it-py==3.0.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# rich
markupsafe==3.0.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# jinja2
matplotlib==3.9.2
# via -r requirements/requirements-dev.in
mdurl==0.1.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# markdown-it-py
multiprocess==0.70.17
# via pathos
@@ -98,15 +98,15 @@ nodeenv==1.9.1
# via pre-commit
numpy==1.26.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# contourpy
# matplotlib
# pandas
packaging==24.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# build
# matplotlib
# pytest
@@ -117,8 +117,8 @@ pathos==0.3.3
# via lox
pillow==10.4.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# matplotlib
pip-tools==7.4.1
# via -r requirements/requirements-dev.in
@@ -134,8 +134,8 @@ pre-commit==4.0.1
# via -r requirements/requirements-dev.in
pygments==2.18.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# rich
# sphinx
pyparsing==3.2.0
@@ -145,29 +145,33 @@ pyproject-hooks==1.2.0
# build
# pip-tools
pytest==8.3.3
# via
# -r requirements/requirements-dev.in
# pytest-env
pytest-env==1.1.5
# via -r requirements/requirements-dev.in
python-dateutil==2.9.0.post0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# matplotlib
# pandas
pytz==2024.2
# via pandas
pyyaml==6.0.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# pre-commit
requests==2.32.3
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# sphinx
rich==13.9.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# typer
semver==3.0.2
# via -r requirements/requirements-dev.in
@@ -175,8 +179,8 @@ shellingham==1.5.4
# via typer
six==1.16.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# python-dateutil
snowballstemmer==2.2.0
# via sphinx
@@ -200,23 +204,23 @@ sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
typer==0.13.0
typer==0.13.1
# via -r requirements/requirements-dev.in
typing-extensions==4.12.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# typer
tzdata==2024.2
# via pandas
urllib3==2.2.3
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# requests
virtualenv==20.27.1
virtualenv==20.28.0
# via pre-commit
wheel==0.45.0
wheel==0.45.1
# via pip-tools
# The following packages are considered to be unsafe in a requirements file:

View File

@@ -6,65 +6,65 @@
#
aiohappyeyeballs==2.4.3
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# aiohttp
aiohttp==3.11.2
aiohttp==3.11.7
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# huggingface-hub
# llama-index-core
aiosignal==1.3.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# aiohttp
annotated-types==0.7.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# pydantic
anyio==4.6.2.post1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# httpx
attrs==24.2.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# aiohttp
certifi==2024.8.30
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# httpcore
# httpx
# requests
charset-normalizer==3.4.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# requests
click==8.1.7
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# nltk
dataclasses-json==0.6.7
# via llama-index-core
deprecated==1.2.14
deprecated==1.2.15
# via llama-index-core
dirtyjson==1.0.8
# via llama-index-core
filelock==3.16.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# huggingface-hub
# torch
@@ -73,14 +73,14 @@ filetype==1.2.0
# via llama-index-core
frozenlist==1.5.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# aiohttp
# aiosignal
fsspec==2024.10.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# huggingface-hub
# llama-index-core
# torch
@@ -90,31 +90,31 @@ greenlet==3.0.3
# sqlalchemy
h11==0.14.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# httpcore
httpcore==1.0.6
httpcore==1.0.7
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# httpx
httpx==0.27.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# llama-index-core
huggingface-hub[inference]==0.26.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# llama-index-embeddings-huggingface
# sentence-transformers
# tokenizers
# transformers
idna==3.10
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# anyio
# httpx
@@ -122,24 +122,24 @@ idna==3.10
# yarl
jinja2==3.1.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# torch
joblib==1.4.2
# via
# nltk
# scikit-learn
llama-index-core==0.11.23
llama-index-core==0.12.0
# via
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.3.1
llama-index-embeddings-huggingface==0.4.0
# via -r requirements/requirements-help.in
markupsafe==3.0.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# jinja2
marshmallow==3.23.1
@@ -148,8 +148,8 @@ mpmath==1.3.0
# via sympy
multidict==6.1.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# aiohttp
# yarl
mypy-extensions==1.0.0
@@ -158,16 +158,16 @@ nest-asyncio==1.6.0
# via llama-index-core
networkx==3.2.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# llama-index-core
# torch
nltk==3.9.1
# via llama-index-core
numpy==1.26.4
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# llama-index-core
# scikit-learn
@@ -175,54 +175,54 @@ numpy==1.26.4
# transformers
packaging==24.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# huggingface-hub
# marshmallow
# transformers
pillow==10.4.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# llama-index-core
# sentence-transformers
propcache==0.2.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# aiohttp
# yarl
pydantic==2.9.2
pydantic==2.10.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# llama-index-core
pydantic-core==2.23.4
pydantic-core==2.27.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# pydantic
pyyaml==6.0.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# huggingface-hub
# llama-index-core
# transformers
regex==2024.11.6
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# nltk
# tiktoken
# transformers
requests==2.32.3
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# huggingface-hub
# llama-index-core
@@ -234,16 +234,16 @@ scikit-learn==1.5.2
# via sentence-transformers
scipy==1.13.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# scikit-learn
# sentence-transformers
sentence-transformers==3.3.0
sentence-transformers==3.3.1
# via llama-index-embeddings-huggingface
sniffio==1.3.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# anyio
# httpx
sqlalchemy[asyncio]==2.0.36
@@ -258,20 +258,20 @@ threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.8.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# llama-index-core
tokenizers==0.19.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# transformers
torch==2.2.2
# via sentence-transformers
tqdm==4.67.0
tqdm==4.67.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# huggingface-hub
# llama-index-core
# nltk
@@ -281,8 +281,8 @@ transformers==4.44.2
# via sentence-transformers
typing-extensions==4.12.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# huggingface-hub
# llama-index-core
@@ -297,16 +297,16 @@ typing-inspect==0.9.0
# llama-index-core
urllib3==2.2.3
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-dev.txt
# requests
wrapt==1.16.0
wrapt==1.17.0
# via
# deprecated
# llama-index-core
yarl==1.17.1
yarl==1.18.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# aiohttp

View File

@@ -15,8 +15,8 @@ pyee==12.0.0
# via playwright
typing-extensions==4.12.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/../requirements.txt
# -c requirements/requirements-browser.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt

View File

@@ -15,6 +15,6 @@ RUN bundle install --retry 5 --jobs 20
ENTRYPOINT [ "docker-entrypoint.sh" ]
# bundle exec jekyll serve --force_polling -H 0.0.0.0 -P 4000
CMD [ "bundle", "exec", "jekyll", "serve", "--force_polling", "-H", "0.0.0.0", "-P", "4000" ]
CMD [ "bundle", "exec", "jekyll", "serve", "--verbose", "--trace", "--force_polling", "-H", "0.0.0.0", "-P", "4000" ]

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
from collections import defaultdict
@@ -143,8 +144,31 @@ def main():
return
if args.all_since:
results = process_all_tags_since(args.start_tag)
yaml_output = yaml.dump(results, sort_keys=True)
new_results = process_all_tags_since(args.start_tag)
# If output file exists, read and update it
existing_results = []
if args.output and os.path.exists(args.output):
with open(args.output, "r") as f:
existing_results = yaml.safe_load(f) or []
# Create a map of start_tag->end_tag to result for existing entries
existing_map = {(r["start_tag"], r["end_tag"]): i for i, r in enumerate(existing_results)}
# Update or append new results
for new_result in new_results:
key = (new_result["start_tag"], new_result["end_tag"])
if key in existing_map:
# Replace existing entry
existing_results[existing_map[key]] = new_result
else:
# Append new entry
existing_results.append(new_result)
# Sort results by start_tag
existing_results.sort(key=lambda x: semver.Version.parse(x["start_tag"][1:]))
yaml_output = yaml.dump(existing_results, sort_keys=True)
else:
all_file_counts, grand_total, total_lines, aider_total, aider_percentage, end_date = blame(
args.start_tag, args.end_tag

View File

@@ -22,7 +22,9 @@ def has_been_reopened(issue_number):
# Load environment variables from .env file
load_dotenv()
BOT_SUFFIX = """Note: A [bot script](https://github.com/Aider-AI/aider/blob/main/scripts/issues.py) made these updates to the issue.
BOT_SUFFIX = """
Note: [A bot script](https://github.com/Aider-AI/aider/blob/main/scripts/issues.py) made these updates to the issue.
""" # noqa
DUPLICATE_COMMENT = (

View File

@@ -24,6 +24,7 @@ cog $ARG \
aider/website/docs/config/options.md \
aider/website/docs/config/aider_conf.md \
aider/website/docs/config/adv-model-settings.md \
aider/website/docs/config/model-aliases.md \
aider/website/docs/leaderboards/index.md \
aider/website/docs/llms/other.md \
aider/website/docs/more/infinite-output.md \

125
scripts/update-history.py Executable file
View File

@@ -0,0 +1,125 @@
#!/usr/bin/env python3
import os
import re
import subprocess
import tempfile
from aider import __version__
def get_base_version():
# Parse current version like "0.64.2.dev" to get major.minor
match = re.match(r"(\d+\.\d+)", __version__)
if not match:
raise ValueError(f"Could not parse version: {__version__}")
return match.group(1) + ".0"
def run_git_log():
base_ver = get_base_version()
cmd = [
"git",
"log",
"-p",
f"v{base_ver}..HEAD",
"--",
"aider/",
":!aider/website/",
":!scripts/",
":!HISTORY.md",
]
result = subprocess.run(cmd, capture_output=True, text=True)
return result.stdout
def main():
# Get the git log output
diff_content = run_git_log()
# Extract relevant portion of HISTORY.md
base_ver = get_base_version()
with open("HISTORY.md", "r") as f:
history_content = f.read()
# Find the section for this version
version_header = f"### Aider v{base_ver}"
start_idx = history_content.find("# Release history")
if start_idx == -1:
raise ValueError("Could not find start of release history")
# Find where this version's section ends
version_idx = history_content.find(version_header, start_idx)
if version_idx == -1:
raise ValueError(f"Could not find version header: {version_header}")
# Find the next version header after this one
next_version_idx = history_content.find("\n### Aider v", version_idx + len(version_header))
if next_version_idx == -1:
# No next version found, use the rest of the file
relevant_history = history_content[start_idx:]
else:
# Extract just up to the next version
relevant_history = history_content[start_idx:next_version_idx]
# Save relevant portions to temporary files
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".diff") as tmp_diff:
tmp_diff.write(diff_content)
diff_path = tmp_diff.name
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md") as tmp_hist:
tmp_hist.write(relevant_history)
hist_path = tmp_hist.name
# Run blame to get aider percentage
blame_result = subprocess.run(["python3", "scripts/blame.py"], capture_output=True, text=True)
aider_line = blame_result.stdout.strip().split("\n")[-1] # Get last line with percentage
# Construct and run the aider command
message = f"""
Update the history with changes shown in the diffs.
Describe actual user-facing changes, not every single commit that was made implementing them.
Don't edit or duplicate changes that have existing history entries, just add any new items not already listed.
Be sure to attribute changes to the proper .x version.
Changes in the .x-dev version should be listed under a "### main branch" heading
Also, add this as the last bullet under the "### main branch" section:
{aider_line}
""" # noqa
cmd = ["aider", hist_path, "--read", diff_path, "--msg", message, "--no-auto-commit"]
subprocess.run(cmd)
# Read back the updated history
with open(hist_path, "r") as f:
updated_history = f.read()
# Find where the next version section would start
if next_version_idx == -1:
# No next version found, use the rest of the file
full_history = history_content[:start_idx] + updated_history
else:
# Splice the updated portion back in between the unchanged parts
full_history = (
history_content[:start_idx]
+ updated_history # Keep unchanged header
+ history_content[next_version_idx:] # Add updated portion # Keep older entries
)
# Write back the full history
with open("HISTORY.md", "w") as f:
f.write(full_history)
# Run update-docs.sh after aider
subprocess.run(["scripts/update-docs.sh"])
# Cleanup
os.unlink(diff_path)
os.unlink(hist_path)
# Show git diff of HISTORY.md
subprocess.run(["git", "diff", "HISTORY.md"])
if __name__ == "__main__":
main()

View File

@@ -7,6 +7,7 @@ from unittest.mock import MagicMock, patch
import git
from aider.coders import Coder
from aider.coders.base_coder import UnknownEditFormat
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
from aider.models import Model
@@ -168,6 +169,37 @@ class TestCoder(unittest.TestCase):
self.assertEqual(coder.abs_fnames, set([str(fname.resolve())]))
def test_skip_duplicate_basename_mentions(self):
with GitTemporaryDirectory():
io = InputOutput(pretty=False, yes=True)
coder = Coder.create(self.GPT35, None, io)
# Create files with same basename in different directories
fname1 = Path("dir1") / "file.txt"
fname2 = Path("dir2") / "file.txt"
fname3 = Path("dir3") / "unique.txt"
for fname in [fname1, fname2, fname3]:
fname.parent.mkdir(parents=True, exist_ok=True)
fname.touch()
# Add one file to chat
coder.add_rel_fname(str(fname1))
# Mock get_tracked_files to return all files
mock = MagicMock()
mock.return_value = set([str(fname1), str(fname2), str(fname3)])
coder.repo.get_tracked_files = mock
# Check that file mentions skip files with duplicate basenames
mentioned = coder.get_file_mentions(f"Check {fname2} and {fname3}")
self.assertEqual(mentioned, {str(fname3)})
# Add a read-only file with same basename
coder.abs_read_only_fnames.add(str(fname2.resolve()))
mentioned = coder.get_file_mentions(f"Check {fname1} and {fname3}")
self.assertEqual(mentioned, {str(fname3)})
def test_check_for_file_mentions_read_only(self):
with GitTemporaryDirectory():
io = InputOutput(
@@ -739,7 +771,7 @@ two
# Test case with no URL
no_url_input = "This text contains no URL"
result = coder.check_for_urls(no_url_input)
self.assertEqual(result, [])
self.assertEqual(result, no_url_input)
# Test case with the same URL appearing multiple times
repeated_url_input = (
@@ -747,7 +779,8 @@ two
" more time"
)
result = coder.check_for_urls(repeated_url_input)
self.assertEqual(result.count("https://example.com"), 1)
# the original 3 in the input text, plus 1 more for the scraped text
self.assertEqual(result.count("https://example.com"), 4)
self.assertIn("https://example.com", result)
def test_coder_from_coder_with_subdir(self):
@@ -821,32 +854,55 @@ This command will print 'Hello, World!' to the console."""
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = Coder.create(self.GPT35, "diff", io=io, suggest_shell_commands=False)
self.assertFalse(coder.suggest_shell_commands)
def mock_send(*args, **kwargs):
coder.partial_response_content = """Here's a shell command to run:
def test_detect_urls_enabled(self):
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = Coder.create(self.GPT35, "diff", io=io, detect_urls=True)
coder.commands.scraper = MagicMock()
coder.commands.scraper.scrape = MagicMock(return_value="some content")
```bash
echo "Hello, World!"
```
# Test with a message containing a URL
message = "Check out https://example.com"
coder.check_for_urls(message)
coder.commands.scraper.scrape.assert_called_once_with("https://example.com")
This command will print 'Hello, World!' to the console."""
coder.partial_response_function_call = dict()
return []
def test_detect_urls_disabled(self):
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = Coder.create(self.GPT35, "diff", io=io, detect_urls=False)
coder.commands.scraper = MagicMock()
coder.commands.scraper.scrape = MagicMock(return_value="some content")
coder.send = mock_send
# Test with a message containing a URL
message = "Check out https://example.com"
result = coder.check_for_urls(message)
self.assertEqual(result, message)
coder.commands.scraper.scrape.assert_not_called()
# Mock the handle_shell_commands method to check if it's called
coder.handle_shell_commands = MagicMock()
def test_unknown_edit_format_exception(self):
# Test the exception message format
invalid_format = "invalid_format"
valid_formats = ["diff", "whole", "map"]
exc = UnknownEditFormat(invalid_format, valid_formats)
expected_msg = (
f"Unknown edit format {invalid_format}. Valid formats are: {', '.join(valid_formats)}"
)
self.assertEqual(str(exc), expected_msg)
# Run the coder with a message
coder.run(with_message="Suggest a shell command")
def test_unknown_edit_format_creation(self):
# Test that creating a Coder with invalid edit format raises the exception
io = InputOutput(yes=True)
invalid_format = "invalid_format"
# Check if the shell command was added to the list
self.assertEqual(len(coder.shell_commands), 1)
self.assertEqual(coder.shell_commands[0].strip(), 'echo "Hello, World!"')
with self.assertRaises(UnknownEditFormat) as cm:
Coder.create(self.GPT35, invalid_format, io=io)
# Check if handle_shell_commands was called with the correct argument
coder.handle_shell_commands.assert_not_called()
exc = cm.exception
self.assertEqual(exc.edit_format, invalid_format)
self.assertIsInstance(exc.valid_formats, list)
self.assertTrue(len(exc.valid_formats) > 0)
def test_coder_create_with_new_file_oserror(self):
with GitTemporaryDirectory():

View File

@@ -236,7 +236,7 @@ class TestCommands(TestCase):
self.assertIn(str(Path("test_dir/test_file2.txt").resolve()), coder.abs_fnames)
self.assertIn(str(Path("test_dir/another_dir/test_file.txt").resolve()), coder.abs_fnames)
commands.cmd_drop("test_dir/another_dir")
commands.cmd_drop(str(Path("test_dir/another_dir")))
self.assertIn(str(Path("test_dir/test_file1.txt").resolve()), coder.abs_fnames)
self.assertIn(str(Path("test_dir/test_file2.txt").resolve()), coder.abs_fnames)
self.assertNotIn(
@@ -272,6 +272,7 @@ class TestCommands(TestCase):
coder = Coder.create(self.GPT35, None, io)
commands = Commands(io, coder)
# Create test files in root and subdirectory
subdir = Path("subdir")
subdir.mkdir()
(subdir / "subtest1.py").touch()
@@ -279,17 +280,50 @@ class TestCommands(TestCase):
Path("test1.py").touch()
Path("test2.py").touch()
Path("test3.txt").touch()
# Add some files to the chat session
# Add all Python files to the chat session
commands.cmd_add("*.py")
initial_count = len(coder.abs_fnames)
self.assertEqual(initial_count, 2) # Only root .py files should be added
self.assertEqual(len(coder.abs_fnames), 2)
# Call the cmd_drop method with a glob pattern
# Test dropping with glob pattern
commands.cmd_drop("*2.py")
self.assertIn(str(Path("test1.py").resolve()), coder.abs_fnames)
self.assertNotIn(str(Path("test2.py").resolve()), coder.abs_fnames)
self.assertEqual(len(coder.abs_fnames), initial_count - 1)
def test_cmd_drop_without_glob(self):
# Initialize the Commands and InputOutput objects
io = InputOutput(pretty=False, fancy_input=False, yes=True)
from aider.coders import Coder
coder = Coder.create(self.GPT35, None, io)
commands = Commands(io, coder)
# Create test files
test_files = ["file1.txt", "file2.txt", "file3.py"]
for fname in test_files:
Path(fname).touch()
# Add all files to the chat session
for fname in test_files:
commands.cmd_add(fname)
initial_count = len(coder.abs_fnames)
self.assertEqual(initial_count, 3)
# Test dropping individual files without glob
commands.cmd_drop("file1.txt")
self.assertNotIn(str(Path("file1.txt").resolve()), coder.abs_fnames)
self.assertIn(str(Path("file2.txt").resolve()), coder.abs_fnames)
self.assertEqual(len(coder.abs_fnames), initial_count - 1)
# Test dropping multiple files without glob
commands.cmd_drop("file2.txt file3.py")
self.assertNotIn(str(Path("file2.txt").resolve()), coder.abs_fnames)
self.assertNotIn(str(Path("file3.py").resolve()), coder.abs_fnames)
self.assertEqual(len(coder.abs_fnames), 0)
def test_cmd_add_bad_encoding(self):
# Initialize the Commands and InputOutput objects
@@ -903,6 +937,23 @@ class TestCommands(TestCase):
)
)
# Add a dummy message to ensure format_messages() works
vision_coder.cur_messages = [{"role": "user", "content": "Check the image"}]
# Check that the image file appears in the messages
messages = vision_coder.format_messages().all_messages()
found_image = False
for msg in messages:
if msg.get("role") == "user" and "content" in msg:
content = msg["content"]
if isinstance(content, list):
for item in content:
if isinstance(item, dict) and item.get("type") == "text":
if "test_image.jpg" in item.get("text", ""):
found_image = True
break
self.assertTrue(found_image, "Image file not found in messages to LLM")
def test_cmd_read_only_with_glob_pattern(self):
with GitTemporaryDirectory() as repo_dir:
io = InputOutput(pretty=False, fancy_input=False, yes=False)
@@ -1380,6 +1431,43 @@ class TestCommands(TestCase):
finally:
os.unlink(external_file_path)
def test_cmd_drop_read_only_with_relative_path(self):
with ChdirTemporaryDirectory() as repo_dir:
test_file = Path("test_file.txt")
test_file.write_text("Test content")
# Create a test file in a subdirectory
subdir = Path(repo_dir) / "subdir"
subdir.mkdir()
os.chdir(subdir)
io = InputOutput(pretty=False, fancy_input=False, yes=False)
coder = Coder.create(self.GPT35, None, io)
commands = Commands(io, coder)
# Add the file as read-only using absolute path
rel_path = str(Path("..") / "test_file.txt")
commands.cmd_read_only(rel_path)
self.assertEqual(len(coder.abs_read_only_fnames), 1)
# Try to drop using relative path from different working directories
commands.cmd_drop("test_file.txt")
self.assertEqual(len(coder.abs_read_only_fnames), 0)
# Add it again
commands.cmd_read_only(rel_path)
self.assertEqual(len(coder.abs_read_only_fnames), 1)
commands.cmd_drop(rel_path)
self.assertEqual(len(coder.abs_read_only_fnames), 0)
# Add it one more time
commands.cmd_read_only(rel_path)
self.assertEqual(len(coder.abs_read_only_fnames), 1)
commands.cmd_drop("test_file.txt")
self.assertEqual(len(coder.abs_read_only_fnames), 0)
def test_cmd_read_only_with_multiple_files(self):
with GitTemporaryDirectory() as repo_dir:
io = InputOutput(pretty=False, fancy_input=False, yes=False)

View File

@@ -99,6 +99,7 @@ def test_pipe_editor():
patch("aider.editor.write_temp_file") as mock_write,
patch("builtins.open") as mock_open,
patch("os.remove") as mock_remove,
patch("subprocess.call") as mock_subprocess,
):
# Setup mocks
mock_write.return_value = "temp.txt"
@@ -106,22 +107,21 @@ def test_pipe_editor():
mock_file.__enter__.return_value.read.return_value = modified_content
mock_open.return_value = mock_file
with patch("subprocess.call") as mock_subprocess:
# Test with default editor
result = pipe_editor(test_content)
assert result == modified_content
mock_write.assert_called_with(test_content, None)
mock_subprocess.assert_called()
# Test with default editor
result = pipe_editor(test_content)
assert result == modified_content
mock_write.assert_called_with(test_content, None)
mock_subprocess.assert_called()
# Test with custom editor
result = pipe_editor(test_content, editor="code")
assert result == modified_content
mock_subprocess.assert_called()
# Test with custom editor
result = pipe_editor(test_content, editor="code")
assert result == modified_content
mock_subprocess.assert_called()
# Test with suffix
result = pipe_editor(test_content, suffix="md")
assert result == modified_content
mock_write.assert_called_with(test_content, "md")
# Test with suffix
result = pipe_editor(test_content, suffix="md")
assert result == modified_content
mock_write.assert_called_with(test_content, "md")
# Test cleanup on permission error
mock_remove.side_effect = PermissionError

View File

@@ -637,6 +637,53 @@ class TestMain(TestCase):
)
self.assertTrue(coder.suggest_shell_commands)
def test_detect_urls_default(self):
with GitTemporaryDirectory():
coder = main(
["--exit", "--yes"],
input=DummyInput(),
output=DummyOutput(),
return_coder=True,
)
self.assertTrue(coder.detect_urls)
def test_detect_urls_disabled(self):
with GitTemporaryDirectory():
coder = main(
["--no-detect-urls", "--exit", "--yes"],
input=DummyInput(),
output=DummyOutput(),
return_coder=True,
)
self.assertFalse(coder.detect_urls)
def test_detect_urls_enabled(self):
with GitTemporaryDirectory():
coder = main(
["--detect-urls", "--exit", "--yes"],
input=DummyInput(),
output=DummyOutput(),
return_coder=True,
)
self.assertTrue(coder.detect_urls)
def test_pytest_env_vars(self):
# Verify that environment variables from pytest.ini are properly set
self.assertEqual(os.environ.get("AIDER_ANALYTICS"), "false")
def test_invalid_edit_format(self):
with GitTemporaryDirectory():
with patch("aider.io.InputOutput.offer_url") as mock_offer_url:
result = main(
["--edit-format", "not-a-real-format", "--exit", "--yes"],
input=DummyInput(),
output=DummyOutput(),
)
self.assertEqual(result, 1) # main() should return 1 on error
mock_offer_url.assert_called_once()
args, _ = mock_offer_url.call_args
self.assertEqual(args[0], "https://aider.chat/docs/more/edit-formats.html")
def test_chat_language_spanish(self):
with GitTemporaryDirectory():
coder = main(

View File

@@ -92,6 +92,36 @@ class TestModels(unittest.TestCase):
any("bogus-model" in msg for msg in warning_messages)
) # Check that one of the warnings mentions the bogus model
def test_model_aliases(self):
# Test common aliases
model = Model("4")
self.assertEqual(model.name, "gpt-4-0613")
model = Model("4o")
self.assertEqual(model.name, "gpt-4o-2024-08-06")
model = Model("35turbo")
self.assertEqual(model.name, "gpt-3.5-turbo")
model = Model("35-turbo")
self.assertEqual(model.name, "gpt-3.5-turbo")
model = Model("3")
self.assertEqual(model.name, "gpt-3.5-turbo")
model = Model("sonnet")
self.assertEqual(model.name, "claude-3-5-sonnet-20241022")
model = Model("haiku")
self.assertEqual(model.name, "claude-3-5-haiku-20241022")
model = Model("opus")
self.assertEqual(model.name, "claude-3-opus-20240229")
# Test non-alias passes through unchanged
model = Model("gpt-4")
self.assertEqual(model.name, "gpt-4")
def test_aider_extra_model_settings(self):
import tempfile

View File

@@ -278,94 +278,6 @@ class TestRepoMapTypescript(unittest.TestCase):
def setUp(self):
self.GPT35 = Model("gpt-3.5-turbo")
def test_get_repo_map_typescript(self):
# Create a temporary directory with a sample TypeScript file
test_file_ts = "test_file.ts"
file_content_ts = """\
interface IMyInterface {
someMethod(): void;
}
type ExampleType = {
key: string;
value: number;
};
enum Status {
New,
InProgress,
Completed,
}
export class MyClass {
constructor(public value: number) {}
add(input: number): number {
return this.value + input;
return this.value + input;
}
}
export function myFunction(input: number): number {
return input * 2;
}
"""
with IgnorantTemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, test_file_ts), "w") as f:
f.write(file_content_ts)
io = InputOutput()
repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io)
other_files = [os.path.join(temp_dir, test_file_ts)]
result = repo_map.get_repo_map([], other_files)
# Check if the result contains the expected tags map with TypeScript identifiers
self.assertIn("test_file.ts", result)
self.assertIn("IMyInterface", result)
self.assertIn("ExampleType", result)
self.assertIn("Status", result)
self.assertIn("MyClass", result)
self.assertIn("add", result)
self.assertIn("myFunction", result)
# close the open cache files, so Windows won't error
del repo_map
def test_get_repo_map_tsx(self):
# Create a temporary directory with a sample TSX file
test_file_tsx = "test_file.tsx"
file_content_tsx = """\
import React from 'react';
interface GreetingProps {
name: string;
}
const Greeting: React.FC<GreetingProps> = ({ name }) => {
return <h1>Hello, {name}!</h1>;
};
export default Greeting;
"""
with IgnorantTemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, test_file_tsx), "w") as f:
f.write(file_content_tsx)
io = InputOutput()
repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io)
other_files = [os.path.join(temp_dir, test_file_tsx)]
result = repo_map.get_repo_map([], other_files)
# Check if the result contains the expected tags map with TSX identifiers
self.assertIn("test_file.tsx", result)
self.assertIn("GreetingProps", result)
self.assertIn("Greeting", result)
# close the open cache files, so Windows won't error
del repo_map
class TestRepoMapAllLanguages(unittest.TestCase):
def setUp(self):
@@ -373,101 +285,69 @@ class TestRepoMapAllLanguages(unittest.TestCase):
def test_get_repo_map_all_languages(self):
language_files = {
"c": (
"test.c",
(
'#include <stdio.h>\n\nint main() {\n printf("Hello, World!\\n");\n '
" return 0;\n}\n"
),
),
"csharp": (
"test.cs",
(
"using System;\n\nclass Program {\n static void Main() {\n "
' Console.WriteLine("Hello, World!");\n }\n}\n'
),
),
"cpp": (
"test.cpp",
(
'#include <iostream>\n\nint main() {\n std::cout << "Hello, World!" <<'
" std::endl;\n return 0;\n}\n"
),
),
"elisp": ("test.el", '(defun greet (name)\n (message "Hello, %s!" name))\n'),
"elixir": (
"test.ex",
(
'defmodule Greeter do\n def hello(name) do\n IO.puts("Hello, #{name}!")\n '
" end\nend\n"
),
),
"elm": (
"test.elm",
(
"module Main exposing (main)\n\nimport Html exposing (text)\n\nmain =\n text"
' "Hello, World!"\n'
),
),
"go": (
"test.go",
(
'package main\n\nimport "fmt"\n\nfunc main() {\n fmt.Println("Hello,'
' World!")\n}\n'
),
),
"java": (
"Test.java",
(
"public class Test {\n public static void main(String[] args) {\n "
' System.out.println("Hello, World!");\n }\n}\n'
),
),
"javascript": (
"test.js",
"function greet(name) {\n console.log(`Hello, ${name}!`);\n}\n",
),
"ocaml": ("test.ml", 'let greet name =\n Printf.printf "Hello, %s!\\n" name\n'),
"php": (
"test.php",
'<?php\nfunction greet($name) {\n echo "Hello, $name!";\n}\n?>\n',
),
"python": ("test.py", 'def greet(name):\n print(f"Hello, {name}!")\n'),
"ql": ("test.ql", 'predicate greet(string name) {\n name = "World"\n}\n'),
"ruby": ("test.rb", 'def greet(name)\n puts "Hello, #{name}!"\nend\n'),
"rust": ("test.rs", 'fn main() {\n println!("Hello, World!");\n}\n'),
"typescript": (
"test.ts",
"function greet(name: string): void {\n console.log(`Hello, ${name}!`);\n}\n",
),
"tsx": (
"test.tsx",
(
"import React from 'react';\n\nconst Greeting: React.FC<{ name: string }> = ({"
" name }) => {\n return <h1>Hello, {name}!</h1>;\n};\n\nexport default"
" Greeting;\n"
),
),
"c": ("c", "main"),
"cpp": ("cpp", "main"),
"elixir": ("ex", "Greeter"),
"java": ("java", "Greeting"),
"javascript": ("js", "Person"),
"ocaml": ("ml", "Greeter"),
"php": ("php", "greet"),
"python": ("py", "Person"),
"ql": ("ql", "greet"),
"ruby": ("rb", "greet"),
"rust": ("rs", "Person"),
"typescript": ("ts", "greet"),
"tsx": ("tsx", "UserProps"),
"csharp": ("cs", "IGreeter"),
"elisp": ("el", "greeter"),
"elm": ("elm", "Person"),
"go": ("go", "Greeter"),
}
with IgnorantTemporaryDirectory() as temp_dir:
for _, (filename, content) in language_files.items():
with open(os.path.join(temp_dir, filename), "w") as f:
fixtures_dir = Path(__file__).parent.parent / "fixtures" / "languages"
for lang, key_symbol in language_files.items():
# Get the fixture file path and name based on language
fixture_dir = fixtures_dir / lang
ext, key_symbol = language_files[lang]
filename = f"test.{ext}"
fixture_path = fixture_dir / filename
self.assertTrue(
fixture_path.exists(), f"Fixture file missing for {lang}: {fixture_path}"
)
# Read the fixture content
with open(fixture_path, "r", encoding="utf-8") as f:
content = f.read()
with GitTemporaryDirectory() as temp_dir:
test_file = os.path.join(temp_dir, filename)
with open(test_file, "w", encoding="utf-8") as f:
f.write(content)
io = InputOutput()
repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io)
other_files = [
os.path.join(temp_dir, filename) for filename, _ in language_files.values()
]
result = repo_map.get_repo_map([], other_files)
io = InputOutput()
repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io)
other_files = [filename]
result = repo_map.get_repo_map([], other_files)
dump(lang)
dump(result)
# Check if the result contains all the expected files
for lang, (filename, _) in language_files.items():
self.assertIn(filename, result, f"File for language {lang} not found in repo map")
self.assertGreater(len(result.strip().splitlines()), 1)
# close the open cache files, so Windows won't error
del repo_map
# Check if the result contains all the expected files and symbols
self.assertIn(
filename, result, f"File for language {lang} not found in repo map: {result}"
)
self.assertIn(
key_symbol,
result,
(
f"Key symbol '{key_symbol}' for language {lang} not found in repo map:"
f" {result}"
),
)
# close the open cache files, so Windows won't error
del repo_map
def test_repo_map_sample_code_base(self):
# Path to the sample code base

103
tests/basic/test_voice.py Normal file
View File

@@ -0,0 +1,103 @@
import os
import queue
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
from aider.voice import SoundDeviceError, Voice
# Mock the entire sounddevice module
@pytest.fixture
def mock_sounddevice():
mock_sd = MagicMock()
mock_sd.query_devices.return_value = [
{"name": "test_device", "max_input_channels": 2},
{"name": "another_device", "max_input_channels": 1},
]
with patch.dict("sys.modules", {"sounddevice": mock_sd}):
yield mock_sd
@pytest.fixture
def mock_soundfile():
with patch("aider.voice.sf") as mock_sf:
yield mock_sf
def test_voice_init_default_device(mock_sounddevice):
voice = Voice()
assert voice.device_id is None
assert voice.audio_format == "wav"
assert voice.sd == mock_sounddevice
def test_voice_init_specific_device(mock_sounddevice):
voice = Voice(device_name="test_device")
assert voice.device_id == 0
assert voice.sd == mock_sounddevice
def test_voice_init_invalid_device(mock_sounddevice):
with pytest.raises(ValueError) as exc:
Voice(device_name="nonexistent_device")
assert "Device" in str(exc.value)
assert "not found" in str(exc.value)
def test_voice_init_invalid_format():
with patch("aider.voice.sf", MagicMock()): # Need to mock sf to avoid SoundDeviceError
with pytest.raises(ValueError) as exc:
Voice(audio_format="invalid")
assert "Unsupported audio format" in str(exc.value)
def test_callback_processing():
with patch("aider.voice.sf", MagicMock()): # Need to mock sf to avoid SoundDeviceError
voice = Voice()
voice.q = queue.Queue()
# Test with silence (low amplitude)
test_data = np.zeros((1000, 1))
voice.callback(test_data, None, None, None)
assert voice.pct == 0.5 # When range is too small (<=0.001), pct is set to 0.5
# Test with loud signal (high amplitude)
test_data = np.ones((1000, 1))
voice.callback(test_data, None, None, None)
assert voice.pct > 0.9
# Verify data is queued
assert not voice.q.empty()
def test_get_prompt():
with patch("aider.voice.sf", MagicMock()): # Need to mock sf to avoid SoundDeviceError
voice = Voice()
voice.start_time = os.times().elapsed
voice.pct = 0.5 # 50% volume level
prompt = voice.get_prompt()
assert "Recording" in prompt
assert "sec" in prompt
assert "" in prompt # Should contain some filled blocks
assert "" in prompt # Should contain some empty blocks
def test_record_and_transcribe_keyboard_interrupt():
with patch("aider.voice.sf", MagicMock()):
voice = Voice()
with patch.object(voice, "raw_record_and_transcribe", side_effect=KeyboardInterrupt()):
result = voice.record_and_transcribe()
assert result is None
def test_record_and_transcribe_device_error():
with patch("aider.voice.sf", MagicMock()):
voice = Voice()
with patch.object(
voice, "raw_record_and_transcribe", side_effect=SoundDeviceError("Test error")
):
result = voice.record_and_transcribe()
assert result is None

6
tests/fixtures/languages/c/test.c vendored Normal file
View File

@@ -0,0 +1,6 @@
#include <stdio.h>
int main() {
printf("Hello, World!\n");
return 0;
}

6
tests/fixtures/languages/cpp/test.cpp vendored Normal file
View File

@@ -0,0 +1,6 @@
#include <iostream>
int main() {
std::cout << "Hello, World!" << std::endl;
return 0;
}

39
tests/fixtures/languages/csharp/test.cs vendored Normal file
View File

@@ -0,0 +1,39 @@
using System;
using System.Collections.Generic;
namespace Greetings {
public interface IGreeter {
string Greet(string name);
}
public class Person {
public string Name { get; set; }
public int Age { get; set; }
public Person(string name, int age) {
Name = name;
Age = age;
}
}
public class FormalGreeter : IGreeter {
private const string PREFIX = "Good day";
private static readonly int MAX_AGE = 150;
public string Greet(string name) {
return $"{PREFIX}, {name}!";
}
public string GreetPerson(Person person) {
return $"{PREFIX}, {person.Name} ({person.Age})!";
}
}
public class Program {
static void Main() {
var greeter = new FormalGreeter();
var person = new Person("World", 42);
Console.WriteLine(greeter.GreetPerson(person));
}
}
}

25
tests/fixtures/languages/elisp/test.el vendored Normal file
View File

@@ -0,0 +1,25 @@
(defvar *default-greeting* "Hello")
(defvar *max-name-length* 50)
(defstruct person
(name "Anonymous")
(age 0))
(defclass greeter ()
((prefix :initarg :prefix
:accessor greeter-prefix
:initform *default-greeting*)))
(defmethod greet ((g greeter) (p person))
(format nil "~A, ~A! You are ~D years old."
(greeter-prefix g)
(person-name p)
(person-age p)))
(defun create-formal-greeter ()
(make-instance 'greeter :prefix "Good day"))
(defun main ()
(let ((greeter (create-formal-greeter))
(person (make-person :name "World" :age 42)))
(message "%s" (greet greeter person))))

View File

@@ -0,0 +1,5 @@
defmodule Greeter do
def hello(name) do
IO.puts("Hello, #{name}!")
end
end

38
tests/fixtures/languages/elm/test.elm vendored Normal file
View File

@@ -0,0 +1,38 @@
module Main exposing (main, Person, Greeting)
import Html exposing (Html, div, text)
import Html.Attributes exposing (class)
type alias Person =
{ name : String
, age : Int
}
type Greeting
= Formal
| Casual
greet : Greeting -> Person -> String
greet style person =
let
prefix =
case style of
Formal ->
"Good day"
Casual ->
"Hi"
in
prefix ++ ", " ++ person.name ++ "!"
defaultPerson : Person
defaultPerson =
{ name = "World"
, age = 42
}
main : Html msg
main =
div [ class "greeting" ]
[ text (greet Formal defaultPerson)
]

42
tests/fixtures/languages/go/test.go vendored Normal file
View File

@@ -0,0 +1,42 @@
package main
import (
"fmt"
"strings"
)
// Person represents someone who can be greeted
type Person struct {
Name string
Age int
}
// Greeter defines greeting behavior
type Greeter interface {
Greet(p Person) string
}
// FormalGreeter implements Greeter with formal style
type FormalGreeter struct {
Prefix string
}
const (
DefaultName = "World"
MaxAge = 150
)
func (g FormalGreeter) Greet(p Person) string {
return fmt.Sprintf("%s, %s! You are %d years old.",
g.Prefix, p.Name, p.Age)
}
func NewFormalGreeter() *FormalGreeter {
return &FormalGreeter{Prefix: "Good day"}
}
func main() {
greeter := NewFormalGreeter()
person := Person{Name: DefaultName, Age: 42}
fmt.Println(greeter.Greet(person))
}

16
tests/fixtures/languages/java/test.java vendored Normal file
View File

@@ -0,0 +1,16 @@
public interface Greeting {
String greet(String name);
}
public class Test implements Greeting {
private String prefix = "Hello";
public String greet(String name) {
return prefix + ", " + name + "!";
}
public static void main(String[] args) {
Test greeter = new Test();
System.out.println(greeter.greet("World"));
}
}

View File

@@ -0,0 +1,26 @@
// Class definition
class Person {
constructor(name) {
this.name = name;
}
sayHello() {
return `Hello, ${this.name}!`;
}
}
// Function declaration
function greet(person) {
return person.sayHello();
}
// Variables and constants
const DEFAULT_NAME = 'World';
let currentPerson = new Person(DEFAULT_NAME);
// Export for use in other modules
module.exports = {
Person,
greet,
DEFAULT_NAME
};

19
tests/fixtures/languages/ocaml/test.ml vendored Normal file
View File

@@ -0,0 +1,19 @@
(* Module definition *)
module Greeter = struct
type person = {
name: string;
age: int
}
let create_person name age =
{name; age}
let greet person =
Printf.printf "Hello, %s! You are %d years old.\n"
person.name person.age
end
(* Outside the module *)
let () =
let person = Greeter.create_person "Alice" 30 in
Greeter.greet person

5
tests/fixtures/languages/php/test.php vendored Normal file
View File

@@ -0,0 +1,5 @@
<?php
function greet($name) {
echo "Hello, $name!";
}
?>

28
tests/fixtures/languages/python/test.py vendored Normal file
View File

@@ -0,0 +1,28 @@
from typing import List, Optional
class Person:
"""A class representing a person."""
def __init__(self, name: str, age: Optional[int] = None):
self.name = name
self.age = age
def greet(self, formal: bool = False) -> str:
"""Generate a greeting."""
prefix = "Good day" if formal else "Hello"
return f"{prefix}, {self.name}!"
def create_greeting_list(people: List[Person]) -> List[str]:
"""Create greetings for a list of people."""
return [person.greet() for person in people]
# Constants
DEFAULT_NAME = "World"
MAX_AGE = 150
if __name__ == "__main__":
person = Person(DEFAULT_NAME)
print(person.greet())

3
tests/fixtures/languages/ql/test.ql vendored Normal file
View File

@@ -0,0 +1,3 @@
predicate greet(string name) {
name = "World"
}

3
tests/fixtures/languages/ruby/test.rb vendored Normal file
View File

@@ -0,0 +1,3 @@
def greet(name)
puts "Hello, #{name}!"
end

33
tests/fixtures/languages/rust/test.rs vendored Normal file
View File

@@ -0,0 +1,33 @@
// Define a trait
trait Greeting {
fn greet(&self) -> String;
}
// Define a struct
struct Person {
name: String,
age: u32,
}
// Implement the trait for Person
impl Greeting for Person {
fn greet(&self) -> String {
format!("Hello, {}! You are {} years old.", self.name, self.age)
}
}
// Implementation block for Person
impl Person {
fn new(name: String, age: u32) -> Self {
Person { name, age }
}
}
// Constants
const DEFAULT_NAME: &str = "World";
const MAX_AGE: u32 = 150;
fn main() {
let person = Person::new(DEFAULT_NAME.to_string(), 30);
println!("{}", person.greet());
}

30
tests/fixtures/languages/tsx/test.tsx vendored Normal file
View File

@@ -0,0 +1,30 @@
import React, { useState, useEffect } from 'react';
interface UserProps {
name: string;
age?: number;
}
// Component with props interface
const UserGreeting: React.FC<UserProps> = ({ name, age }) => {
const [greeting, setGreeting] = useState<string>('');
useEffect(() => {
setGreeting(`Hello, ${name}${age ? ` (${age})` : ''}!`);
}, [name, age]);
return <h1>{greeting}</h1>;
};
// Custom hook
function useCounter(initial: number = 0) {
const [count, setCount] = useState(initial);
const increment = () => setCount(c => c + 1);
return { count, increment };
}
// Constants
const DEFAULT_NAME = 'World';
const MAX_AGE = 150;
export { UserGreeting, useCounter, DEFAULT_NAME, MAX_AGE };

View File

@@ -0,0 +1,3 @@
function greet(name: string): void {
console.log(`Hello, ${name}!`);
}