mirror of
https://github.com/Aider-AI/aider
synced 2026-04-26 01:25:17 +02:00
Compare commits
606 Commits
v0.81.4.de
...
v0.83.1.de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f928ffc3fc | ||
|
|
23cb604e6e | ||
|
|
425fb6d7a8 | ||
|
|
28d87767cd | ||
|
|
ed262b8b06 | ||
|
|
7f30320566 | ||
|
|
9d74e8c730 | ||
|
|
1b2eeaff56 | ||
|
|
0632c7a90f | ||
|
|
c806f18698 | ||
|
|
91d7fbd659 | ||
|
|
fcc85a7ae6 | ||
|
|
dbfba029af | ||
|
|
88fba5f20b | ||
|
|
f7a073961c | ||
|
|
f8c154edce | ||
|
|
c6ad5c8cd2 | ||
|
|
af9ae849bd | ||
|
|
64b4d13880 | ||
|
|
6620141420 | ||
|
|
d79bc2c05b | ||
|
|
9978f6c51e | ||
|
|
5be642fbec | ||
|
|
9f1ef3f49f | ||
|
|
a3562d1d62 | ||
|
|
4e608dbd77 | ||
|
|
3f49acf390 | ||
|
|
77deb35022 | ||
|
|
1a7960810c | ||
|
|
766a41d5de | ||
|
|
df967e4b41 | ||
|
|
781ed90653 | ||
|
|
b9885bb76d | ||
|
|
11480f6110 | ||
|
|
2bc9386876 | ||
|
|
04cbe87caa | ||
|
|
4c959f4542 | ||
|
|
8652fcf86e | ||
|
|
23714d7db6 | ||
|
|
81b86441fd | ||
|
|
edb3bf84cc | ||
|
|
4d5852a30e | ||
|
|
7a5877ea50 | ||
|
|
52ae22bcf8 | ||
|
|
4fb2d78011 | ||
|
|
c93c22ec98 | ||
|
|
a26a3145ba | ||
|
|
055a3d795a | ||
|
|
2d34b738bc | ||
|
|
292aa9bded | ||
|
|
4e86a82a08 | ||
|
|
784ac79da1 | ||
|
|
647f556582 | ||
|
|
aad6838e15 | ||
|
|
95cc362c07 | ||
|
|
9ef506dc25 | ||
|
|
b236e0c801 | ||
|
|
c706663841 | ||
|
|
d7e091f315 | ||
|
|
37601eb4b7 | ||
|
|
a22772b388 | ||
|
|
befff1f22e | ||
|
|
0864a7ca76 | ||
|
|
01592afac3 | ||
|
|
3a5a46253d | ||
|
|
5bb891b2bb | ||
|
|
18f702b95a | ||
|
|
e6a35be5b7 | ||
|
|
6351964bcd | ||
|
|
ede3061fe0 | ||
|
|
f1121e3b7c | ||
|
|
a1cb86dca3 | ||
|
|
cf1d58745e | ||
|
|
98dc8e5d57 | ||
|
|
21a05ead4e | ||
|
|
80f78ee85d | ||
|
|
540b2519c2 | ||
|
|
d3931f67ca | ||
|
|
b6a32d8682 | ||
|
|
023e939798 | ||
|
|
38e7f04e60 | ||
|
|
b40baaceea | ||
|
|
ff549cf9ba | ||
|
|
2c1685bb36 | ||
|
|
2a61494442 | ||
|
|
0af5563e77 | ||
|
|
c147571b18 | ||
|
|
311981f4e5 | ||
|
|
79923c954b | ||
|
|
0b4430f228 | ||
|
|
ee9ad75509 | ||
|
|
920b20b17d | ||
|
|
9297ee982d | ||
|
|
1d5c3c3a2b | ||
|
|
217b45ae88 | ||
|
|
1f6f480864 | ||
|
|
40a5a88d56 | ||
|
|
30097ab859 | ||
|
|
09acfc8147 | ||
|
|
a2ecc5883b | ||
|
|
d127d45669 | ||
|
|
2ebb2103b8 | ||
|
|
c3d4fdb4c1 | ||
|
|
e1ab9cc0ab | ||
|
|
15317a9f4b | ||
|
|
62dc55dd77 | ||
|
|
20faadcbd9 | ||
|
|
8f0fa6684d | ||
|
|
7a3805d39f | ||
|
|
4709a539c6 | ||
|
|
8172125931 | ||
|
|
b8f9d459fb | ||
|
|
96bc57167f | ||
|
|
606e27a337 | ||
|
|
1d7c56b8c5 | ||
|
|
6e1327f66d | ||
|
|
82f33c1220 | ||
|
|
cd7567fcf6 | ||
|
|
e4274aa4f6 | ||
|
|
acd7309b78 | ||
|
|
d5ea078f24 | ||
|
|
8776830306 | ||
|
|
43dd9ef8a5 | ||
|
|
f047b2928b | ||
|
|
d89d500eab | ||
|
|
35fe1df499 | ||
|
|
d32d0b7909 | ||
|
|
0a5c1960b3 | ||
|
|
eef0051b93 | ||
|
|
b5cde63b37 | ||
|
|
043c42b2b4 | ||
|
|
758fa6f67e | ||
|
|
c2fce2699e | ||
|
|
328584e5f4 | ||
|
|
f12395f4d3 | ||
|
|
024c3ed46e | ||
|
|
3ed897c665 | ||
|
|
bfcff84b28 | ||
|
|
4124cee722 | ||
|
|
d18a9f32bc | ||
|
|
aef3863c4a | ||
|
|
f31128706d | ||
|
|
1307215b8f | ||
|
|
cb380b423e | ||
|
|
86d338c811 | ||
|
|
dd3ef07881 | ||
|
|
69f14ace01 | ||
|
|
08220f598c | ||
|
|
9badb711ff | ||
|
|
90b5f897f9 | ||
|
|
4a14aeb7d9 | ||
|
|
fef0f1fa3a | ||
|
|
a39cec8e1d | ||
|
|
c89ac40f56 | ||
|
|
114a0e5ab9 | ||
|
|
371c82e5bb | ||
|
|
71338a679e | ||
|
|
aeaf259021 | ||
|
|
bdec02e290 | ||
|
|
5090f28151 | ||
|
|
a98b531bcc | ||
|
|
8727ffbe68 | ||
|
|
e7de5382fb | ||
|
|
8956eef339 | ||
|
|
0c236d0035 | ||
|
|
aaacee5d4d | ||
|
|
da00455388 | ||
|
|
03acee1ed2 | ||
|
|
4ab8faf21e | ||
|
|
2f45023f59 | ||
|
|
1d2818a064 | ||
|
|
582da0ee44 | ||
|
|
592dea0f8c | ||
|
|
dd8db78680 | ||
|
|
23ce877bd2 | ||
|
|
8bb971c15d | ||
|
|
fe20e528b0 | ||
|
|
8dd8fb52f4 | ||
|
|
af9fcdcfa8 | ||
|
|
9990965e82 | ||
|
|
5b52063446 | ||
|
|
b2e3d47d14 | ||
|
|
67cbda3bd5 | ||
|
|
84d6cf937b | ||
|
|
765ac2a14d | ||
|
|
1167700a53 | ||
|
|
c6954f9972 | ||
|
|
c72e5fcc5e | ||
|
|
4ec075d290 | ||
|
|
60a1a3a8c8 | ||
|
|
bf38754846 | ||
|
|
94197cb25d | ||
|
|
cbaaf96324 | ||
|
|
96899a140b | ||
|
|
c756b080e8 | ||
|
|
a61fb1e23b | ||
|
|
9660d95ceb | ||
|
|
eabc98b64a | ||
|
|
5ff3d1a0c5 | ||
|
|
b6587de389 | ||
|
|
4d9f4e0202 | ||
|
|
e9d2f527a1 | ||
|
|
98e6939c48 | ||
|
|
e3911f8621 | ||
|
|
efd5f79368 | ||
|
|
8e84b5c0b1 | ||
|
|
c1dc473ed8 | ||
|
|
3b08327792 | ||
|
|
8b08c5a5f3 | ||
|
|
eedea62ac1 | ||
|
|
146f62abcc | ||
|
|
1c854f2e83 | ||
|
|
d27bb56cf3 | ||
|
|
28aeb17cbe | ||
|
|
b3cf318c5e | ||
|
|
4acf65fcfb | ||
|
|
4c871c6f50 | ||
|
|
d56ce3ae56 | ||
|
|
5225d7f50c | ||
|
|
41392a1c6e | ||
|
|
ca714157b8 | ||
|
|
9dd2d2a3b1 | ||
|
|
e53f2f7674 | ||
|
|
edbfec0ce4 | ||
|
|
d294e8cd49 | ||
|
|
2229bb9817 | ||
|
|
7ef7b6e042 | ||
|
|
8159cbf7d3 | ||
|
|
c23e609902 | ||
|
|
2d9ea25273 | ||
|
|
7773bbc908 | ||
|
|
72476f0967 | ||
|
|
a9883ccc25 | ||
|
|
3b9b93a8a4 | ||
|
|
f90b7bfb09 | ||
|
|
edc941eb9e | ||
|
|
5e7ef6c50e | ||
|
|
fdc7be1318 | ||
|
|
f00c1bf61b | ||
|
|
09030de0b5 | ||
|
|
bdba0ca1c5 | ||
|
|
e17c7d938c | ||
|
|
433f2908a0 | ||
|
|
9fa5f5ace1 | ||
|
|
849a379a8c | ||
|
|
e205629a94 | ||
|
|
9351f37935 | ||
|
|
7d185bb710 | ||
|
|
07759813ed | ||
|
|
591d294052 | ||
|
|
df1a0c5b8d | ||
|
|
e743394537 | ||
|
|
22f140ac05 | ||
|
|
25a303935c | ||
|
|
3bf20d4f7a | ||
|
|
45413ce815 | ||
|
|
8ffe466257 | ||
|
|
d9aa3cb2d4 | ||
|
|
5251a2452c | ||
|
|
6df2c1595f | ||
|
|
c56e4a08d3 | ||
|
|
80515b69c1 | ||
|
|
303645cffa | ||
|
|
b3d32f65d3 | ||
|
|
7c0aac7454 | ||
|
|
7719eae023 | ||
|
|
5e210c700d | ||
|
|
c6ce871700 | ||
|
|
f28504a2eb | ||
|
|
48733a315b | ||
|
|
16fbff8de1 | ||
|
|
bbab0cea5e | ||
|
|
19de93ae39 | ||
|
|
230e5065c1 | ||
|
|
c94340d493 | ||
|
|
ac1ff231e0 | ||
|
|
5423ffe518 | ||
|
|
ba4d613cbc | ||
|
|
ab11118c8a | ||
|
|
3ca3f39f1d | ||
|
|
8c3f167e8c | ||
|
|
1a4d3927e7 | ||
|
|
20a29e5cd1 | ||
|
|
51e0fff822 | ||
|
|
13b3e75d0e | ||
|
|
de28178369 | ||
|
|
2f38cd184c | ||
|
|
d8caa76bc8 | ||
|
|
506c3c928e | ||
|
|
48ac1de8d3 | ||
|
|
ebfce5b0f2 | ||
|
|
58f4db4e52 | ||
|
|
ba2c4d1eb7 | ||
|
|
6656b5d973 | ||
|
|
b4673fdc85 | ||
|
|
ce1266be68 | ||
|
|
226108d05d | ||
|
|
b2d541f1eb | ||
|
|
758020c574 | ||
|
|
876569613b | ||
|
|
82b26daf37 | ||
|
|
be44b65095 | ||
|
|
8596f0d4a3 | ||
|
|
19a94e5f15 | ||
|
|
7bde345b83 | ||
|
|
d45a5747ea | ||
|
|
e560ab61b6 | ||
|
|
84c3ac93ef | ||
|
|
7a50b7779a | ||
|
|
328a3c3178 | ||
|
|
21fa54d792 | ||
|
|
ec7ac60cfc | ||
|
|
c2d8d5dc82 | ||
|
|
20a7e3552c | ||
|
|
888168f044 | ||
|
|
851642a1bd | ||
|
|
f7bdebfba9 | ||
|
|
a4d3222108 | ||
|
|
f1caab9de0 | ||
|
|
c08336fdb0 | ||
|
|
541b496d09 | ||
|
|
622bf349c5 | ||
|
|
05eaf82b36 | ||
|
|
5c8150fd16 | ||
|
|
ec9327dcb4 | ||
|
|
8e689d35af | ||
|
|
50fd544070 | ||
|
|
4f8bd2e06d | ||
|
|
6f1b6f5f31 | ||
|
|
bdfda399cb | ||
|
|
a08ffc3513 | ||
|
|
21beee2fe1 | ||
|
|
a564f94bf3 | ||
|
|
9e54898866 | ||
|
|
739e01da95 | ||
|
|
3e0af2cc84 | ||
|
|
9ff13740f2 | ||
|
|
00e5c33444 | ||
|
|
57abaf7500 | ||
|
|
ed14be4e70 | ||
|
|
80909e17c7 | ||
|
|
52697ea884 | ||
|
|
9f01c8d0d6 | ||
|
|
e91d7e74ae | ||
|
|
20ca0463ea | ||
|
|
5e40f469bf | ||
|
|
7f28d63c33 | ||
|
|
bb1fa24971 | ||
|
|
ffbbaa06d7 | ||
|
|
14e1b96f05 | ||
|
|
d8c781b66b | ||
|
|
2fbec8545c | ||
|
|
b66901fc75 | ||
|
|
d569bca520 | ||
|
|
efbefc669f | ||
|
|
24805ff85d | ||
|
|
8b917d5716 | ||
|
|
3502f335ec | ||
|
|
758979e4f3 | ||
|
|
8b5fc801da | ||
|
|
f5c4214c93 | ||
|
|
f106993cd1 | ||
|
|
270e84287a | ||
|
|
daec7cf3f4 | ||
|
|
bb42d1e9a5 | ||
|
|
23f182aab3 | ||
|
|
119fbc995c | ||
|
|
3081f49179 | ||
|
|
8cf1874453 | ||
|
|
31b4bd5bcf | ||
|
|
71d1591cc1 | ||
|
|
134a2d60fe | ||
|
|
152b8912ae | ||
|
|
36f23c101d | ||
|
|
0e40510295 | ||
|
|
db0d0768d7 | ||
|
|
c68cade9f2 | ||
|
|
14928727eb | ||
|
|
67b9345929 | ||
|
|
dae1a376a2 | ||
|
|
1e359f1dcf | ||
|
|
1c54857422 | ||
|
|
0f78a0ac5c | ||
|
|
4e1e77890b | ||
|
|
5573cdfba1 | ||
|
|
14028d3758 | ||
|
|
3ab673b398 | ||
|
|
861f51f6c3 | ||
|
|
64f5d0d388 | ||
|
|
9059af8d5f | ||
|
|
c3a543b99d | ||
|
|
c85cd783e5 | ||
|
|
af2d241c99 | ||
|
|
30839a5273 | ||
|
|
8baa99b7ef | ||
|
|
d1e5572343 | ||
|
|
96aa648e17 | ||
|
|
1ae5f23dc8 | ||
|
|
f565f72679 | ||
|
|
78e76648d0 | ||
|
|
8e1e2210dd | ||
|
|
e8c43c36d7 | ||
|
|
97e2a7bae0 | ||
|
|
6b75a578ac | ||
|
|
8b9238ebc9 | ||
|
|
8cc8027b40 | ||
|
|
ffb743e108 | ||
|
|
0f805752d3 | ||
|
|
4e9de4d51b | ||
|
|
a4e9539040 | ||
|
|
0c383dfb11 | ||
|
|
11d2b7ca98 | ||
|
|
e38be2f280 | ||
|
|
febdd3c0d0 | ||
|
|
0b08ca64a8 | ||
|
|
0f8e7fbd34 | ||
|
|
1a080ba71c | ||
|
|
1622531d85 | ||
|
|
7d0a9c7233 | ||
|
|
53a64c88ad | ||
|
|
27b51d51d8 | ||
|
|
bec35e0538 | ||
|
|
f65e6a3bb1 | ||
|
|
fd94f1a5f9 | ||
|
|
09fc037d4d | ||
|
|
cf0e6dac61 | ||
|
|
3b10e3bcb5 | ||
|
|
4c17784444 | ||
|
|
6616f0886d | ||
|
|
dcafab2764 | ||
|
|
3b6146301f | ||
|
|
42e09b3c7f | ||
|
|
73da42bee6 | ||
|
|
415b1cf5f0 | ||
|
|
c011285904 | ||
|
|
4314b4fefb | ||
|
|
d686f6844d | ||
|
|
65a0e5f771 | ||
|
|
5ca6d8ce67 | ||
|
|
688c2b9ee5 | ||
|
|
271f39505c | ||
|
|
3e8367ea3b | ||
|
|
67a1e52259 | ||
|
|
7561687b7b | ||
|
|
93fc7acbe3 | ||
|
|
72dc67950f | ||
|
|
e2bebd1d51 | ||
|
|
03560d3386 | ||
|
|
a3a3303a83 | ||
|
|
232a6f87d2 | ||
|
|
ab71ea0a65 | ||
|
|
1302224f39 | ||
|
|
733bf0dcdf | ||
|
|
4ed48178a9 | ||
|
|
8cffb975d9 | ||
|
|
97b18797a4 | ||
|
|
579794b265 | ||
|
|
bea746595e | ||
|
|
87711b048a | ||
|
|
0b468ebd85 | ||
|
|
aefc250e30 | ||
|
|
4a86fea86b | ||
|
|
fe6e2e1ea7 | ||
|
|
09d90b9b70 | ||
|
|
14eb7b46a2 | ||
|
|
66077fe3a4 | ||
|
|
d50cf806db | ||
|
|
95edae9bd1 | ||
|
|
a6c35305ed | ||
|
|
b382005a4c | ||
|
|
a71b90bdd6 | ||
|
|
d4a68c80bc | ||
|
|
fcf44cbebe | ||
|
|
51d8cb063a | ||
|
|
cdc86565cc | ||
|
|
1c54907b30 | ||
|
|
b6d4246e18 | ||
|
|
cc1a984c7e | ||
|
|
52d39657ab | ||
|
|
363ec82a48 | ||
|
|
f164b0e3eb | ||
|
|
3aaf7a69ec | ||
|
|
6d2828bc3c | ||
|
|
dd6e2051a8 | ||
|
|
ef440972bb | ||
|
|
da96888669 | ||
|
|
75639059e1 | ||
|
|
0a15dd311a | ||
|
|
434a1c6710 | ||
|
|
f961eecab6 | ||
|
|
d33a571f7d | ||
|
|
ea1239efef | ||
|
|
19c7c7a9dc | ||
|
|
49e4af4fab | ||
|
|
3e27c1bb17 | ||
|
|
0f8d196741 | ||
|
|
4c45f0e44b | ||
|
|
e39eef1ed7 | ||
|
|
c9c7aea1c4 | ||
|
|
18ff9eb2b4 | ||
|
|
b2f3d2cd84 | ||
|
|
5e0832cb8b | ||
|
|
a14c0ccac6 | ||
|
|
278f90acdd | ||
|
|
8e8b18e9a9 | ||
|
|
a277d74869 | ||
|
|
7ca3b6455d | ||
|
|
5ec6f69037 | ||
|
|
39962ba5eb | ||
|
|
51fa1f9abd | ||
|
|
47af5d463c | ||
|
|
33f0b0b41c | ||
|
|
48038b1f5e | ||
|
|
323698d387 | ||
|
|
1f702beb74 | ||
|
|
7d34c28af1 | ||
|
|
d26be77010 | ||
|
|
3b96d1bd57 | ||
|
|
48fd0e71d5 | ||
|
|
bcb35ccf44 | ||
|
|
a663ff7fa8 | ||
|
|
813d34a0e9 | ||
|
|
a4074a13c4 | ||
|
|
249f329b07 | ||
|
|
cf160a8f84 | ||
|
|
4db963182d | ||
|
|
199b59fdb9 | ||
|
|
2d09bfa0f3 | ||
|
|
729285e8a2 | ||
|
|
afd17bd96a | ||
|
|
380d8570dc | ||
|
|
e711eaa810 | ||
|
|
7dfdc2094e | ||
|
|
838646ac5b | ||
|
|
507f07575b | ||
|
|
f5e8808770 | ||
|
|
ae5b6e88a5 | ||
|
|
b45186dde0 | ||
|
|
38be8aa0da | ||
|
|
816d4ba206 | ||
|
|
ede59e4d2a | ||
|
|
ce0931a3c8 | ||
|
|
a44e148818 | ||
|
|
71115c6558 | ||
|
|
8ae837e98b | ||
|
|
1d42690824 | ||
|
|
3f94fd5e4e | ||
|
|
165e237be7 | ||
|
|
38dfd6f4f9 | ||
|
|
5851d66174 | ||
|
|
6a970c3515 | ||
|
|
9e91e8f1b2 | ||
|
|
3e1bc77bf2 | ||
|
|
d991cb6721 | ||
|
|
37a252748a | ||
|
|
5664b5b195 | ||
|
|
278a596bfa | ||
|
|
ea74f31b3e | ||
|
|
dd4b61da20 | ||
|
|
c56e836d22 | ||
|
|
427f9c5b00 | ||
|
|
aa07e16f18 | ||
|
|
7b8c7edfd5 | ||
|
|
cf7b35f90d | ||
|
|
02bc9a85c0 | ||
|
|
e1820522db | ||
|
|
0a59c38f31 | ||
|
|
66fdeceb3b | ||
|
|
316d8f8e9b | ||
|
|
15d623f2c0 | ||
|
|
d1437b7666 | ||
|
|
ff8e9850ba | ||
|
|
f648a018a2 | ||
|
|
072bd30443 | ||
|
|
48f89f226f | ||
|
|
d5671c2879 | ||
|
|
80114e7a24 | ||
|
|
dede701423 | ||
|
|
43cb4d68f7 | ||
|
|
4783ad3a73 | ||
|
|
482e0c2d0b | ||
|
|
e951164399 | ||
|
|
c73b987cd0 | ||
|
|
b22c9b8542 | ||
|
|
a5327af5e9 | ||
|
|
192f8bec26 | ||
|
|
eb28e22891 | ||
|
|
b6b8f30378 | ||
|
|
67bb4f9552 | ||
|
|
e980973621 | ||
|
|
b3d9e0d1b0 | ||
|
|
7c3d96d0e7 | ||
|
|
cdd730e627 | ||
|
|
21cca34392 | ||
|
|
d64427d726 | ||
|
|
87ccacb99f | ||
|
|
b37773c630 | ||
|
|
4765a90f97 | ||
|
|
29587cd07c | ||
|
|
2651d99676 | ||
|
|
44e5525e6f | ||
|
|
5e48f6898d | ||
|
|
08d48f42ad | ||
|
|
4600dbcda5 | ||
|
|
c1b2ff20de | ||
|
|
c980fd0e77 |
2
.github/workflows/check_pypi_version.yml
vendored
2
.github/workflows/check_pypi_version.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
||||
48
.github/workflows/pre-commit.yml
vendored
Normal file
48
.github/workflows/pre-commit.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
name: pre-commit
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RAW_LOG: pre-commit.log
|
||||
CS_XML: pre-commit.xml
|
||||
steps:
|
||||
- run: sudo apt-get update && sudo apt-get install cppcheck uncrustify
|
||||
if: false
|
||||
- uses: actions/checkout@v4
|
||||
- run: python -m pip install pre-commit
|
||||
- uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ~/.cache/pre-commit/
|
||||
key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Run pre-commit hooks
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
run: |
|
||||
set -o pipefail
|
||||
pre-commit gc
|
||||
pre-commit run --show-diff-on-failure --color=always --all-files | tee ${RAW_LOG}
|
||||
- name: Convert Raw Log to Checkstyle format (launch action)
|
||||
uses: mdeweerd/logToCheckStyle@v2025.1.1
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
in: ${{ env.RAW_LOG }}
|
||||
# out: ${{ env.CS_XML }}
|
||||
- uses: actions/cache/save@v4
|
||||
if: ${{ ! cancelled() }}
|
||||
with:
|
||||
path: ~/.cache/pre-commit/
|
||||
key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Provide log as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ ! cancelled() }}
|
||||
with:
|
||||
name: precommit-logs
|
||||
path: |
|
||||
${{ env.RAW_LOG }}
|
||||
${{ env.CS_XML }}
|
||||
retention-days: 2
|
||||
2
.github/workflows/ubuntu-tests.yml
vendored
2
.github/workflows/ubuntu-tests.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
|
||||
2
.github/workflows/windows-tests.yml
vendored
2
.github/workflows/windows-tests.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
defaults:
|
||||
run:
|
||||
shell: pwsh # Use PowerShell for all run steps
|
||||
|
||||
75
HISTORY.md
75
HISTORY.md
@@ -2,9 +2,80 @@
|
||||
|
||||
### main branch
|
||||
|
||||
- Added support for `gemini-2.5-pro-preview-05-06` models.
|
||||
- Added support for `qwen3-235b` models.
|
||||
- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
|
||||
- Added a spinner animation while waiting for the LLM to start streaming its response.
|
||||
- Updated the spinner animation to a Knight Rider style.
|
||||
- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
|
||||
- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
|
||||
- Marked Gemini 2.5 Pro preview models as `overeager` by default.
|
||||
- Commit message prompt specifies the user's language.
|
||||
- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
|
||||
- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
|
||||
- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
|
||||
- Added tracking of total tokens sent and received, now included in benchmark statistics.
|
||||
- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
|
||||
- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
|
||||
- Improved cost calculation using `litellm.completion_cost` where available.
|
||||
- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
|
||||
- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
|
||||
- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
|
||||
- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
|
||||
- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
|
||||
- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
|
||||
- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
|
||||
- Improved display of filenames in the prompt header using rich Text formatting.
|
||||
- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
|
||||
- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
|
||||
- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
|
||||
- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
|
||||
- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
|
||||
- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
|
||||
- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
|
||||
- Dropped support for Python 3.9.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.82.3
|
||||
|
||||
- Add support for `gemini-2.5-flash-preview-04-17` models.
|
||||
- Improved robustness of edit block parsing when filenames start with backticks or fences.
|
||||
- Add new `udiff-simple` edit format, for Gemini 2.5 Pro.
|
||||
- Update default weak/editor models for Gemini 2.5 Pro models to use `gemini-2.5-flash-preview-04-17`.
|
||||
- Instruct models to reply in the user's detected system language.
|
||||
- Fix parsing of diffs for newly created files (`--- /dev/null`).
|
||||
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
|
||||
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
|
||||
- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Skip scanning files larger than 1MB for AI comments (`--watch`).
|
||||
|
||||
### Aider v0.82.2
|
||||
|
||||
- Fix editing shell files with diff-fenced, by zjy1412.
|
||||
- Improve robustness of patch application by allowing multiple update/delete actions for the same file within a single response.
|
||||
- Update prompts to instruct LLMs to consolidate all edits for a given file into a single block within the patch.
|
||||
|
||||
### Aider v0.82.1
|
||||
|
||||
- Added support for `o3` and `o4-mini` including provider-specific versions for OpenAI, OpenRouter, and Azure.
|
||||
- Added support for Azure specific `gpt-4.1` and `gpt-4.1-mini` models.
|
||||
- Disabled streaming for `o3` models since you need identity verification to stream.
|
||||
- Fixed handling of file paths in unified diffs, especially those generated by git.
|
||||
|
||||
### Aider v0.82.0
|
||||
|
||||
- Support for GPT 4.1, mini and nano.
|
||||
- Added new `patch` edit format for OpenAI's GPT-4.1 model.
|
||||
- Improved support for using architect mode with Gemini 2.5 Pro.
|
||||
- Added new `editor-diff`, `editor-whole`, and `editor-diff-fenced` edit formats.
|
||||
- Bugfix for automatically selecting the best edit format to use in architect mode.
|
||||
- Added support for `grok-3-fast-beta` and `grok-3-mini-fast-beta` models.
|
||||
- Aider wrote 92% of the code in this release.
|
||||
|
||||
### Aider v0.81.3
|
||||
|
||||
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
|
||||
- Updated default settings for Grok models.
|
||||
- Aider wrote 64% of the code in this release.
|
||||
|
||||
### Aider v0.81.2
|
||||
|
||||
@@ -16,14 +87,12 @@
|
||||
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
|
||||
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
|
||||
- Commit messages generated by aider are now lowercase, by Anton Ödman.
|
||||
- Aider wrote 64% of the code in this release.
|
||||
|
||||
### Aider v0.81.1
|
||||
|
||||
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
|
||||
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
|
||||
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
|
||||
- Aider wrote 87% of the code in this release.
|
||||
|
||||
### Aider v0.81.0
|
||||
|
||||
|
||||
14
README.md
14
README.md
@@ -27,13 +27,13 @@ cog.out(text)
|
||||
<a href="https://github.com/Aider-AI/aider/stargazers"><img alt="GitHub Stars" title="Total number of GitHub stars the Aider project has received"
|
||||
src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=github&color=f1c40f&labelColor=555555"/></a>
|
||||
<a href="https://pypi.org/project/aider-chat/"><img alt="PyPI Downloads" title="Total number of installations via pip from PyPI"
|
||||
src="https://img.shields.io/badge/📦%20Installs-1.9M-2ecc71?style=flat-square&labelColor=555555"/></a>
|
||||
src="https://img.shields.io/badge/📦%20Installs-2.2M-2ecc71?style=flat-square&labelColor=555555"/></a>
|
||||
<img alt="Tokens per week" title="Number of tokens processed weekly by Aider users"
|
||||
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-15B-3498db?style=flat-square&labelColor=555555"/>
|
||||
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
|
||||
src="https://img.shields.io/badge/🏆%20OpenRouter-Top%2020-9b59b6?style=flat-square&labelColor=555555"/></a>
|
||||
<a href="https://aider.chat/HISTORY.html"><img alt="Singularity" title="Percentage of the new code in Aider's last release written by Aider itself"
|
||||
src="https://img.shields.io/badge/🔄%20Singularity-86%25-e74c3c?style=flat-square&labelColor=555555"/></a>
|
||||
src="https://img.shields.io/badge/🔄%20Singularity-92%25-e74c3c?style=flat-square&labelColor=555555"/></a>
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
||||
@@ -135,11 +135,12 @@ See the [installation instructions](https://aider.chat/docs/install.html) and [u
|
||||
### Community & Resources
|
||||
- [LLM Leaderboards](https://aider.chat/docs/leaderboards/)
|
||||
- [GitHub Repository](https://github.com/Aider-AI/aider)
|
||||
- [Discord Community](https://discord.gg/Tv2uQnR88V)
|
||||
- [Discord Community](https://discord.gg/Y7X7bhMQFV)
|
||||
- [Blog](https://aider.chat/blog/)
|
||||
|
||||
## Kind Words From Users
|
||||
|
||||
- *"My life has changed... There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world."* — [Eric S. Raymond](https://x.com/esrtweet/status/1910809356381413593)
|
||||
- *"The best free open source AI coding assistant."* — [IndyDevDan](https://youtu.be/YALpX8oOn78)
|
||||
- *"The best AI coding assistant so far."* — [Matthew Berman](https://www.youtube.com/watch?v=df8afeb1FY8)
|
||||
- *"Aider ... has easily quadrupled my coding productivity."* — [SOLAR_FIELDS](https://news.ycombinator.com/item?id=36212100)
|
||||
@@ -167,6 +168,11 @@ See the [installation instructions](https://aider.chat/docs/install.html) and [u
|
||||
- *"Aider is also my best friend."* — [jzn21](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
|
||||
- *"Try Aider, it's worth it."* — [jorgejhms](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
|
||||
- *"I like aider :)"* — [Chenwei Cui](https://x.com/ccui42/status/1904965344999145698)
|
||||
- *"Aider is the precision tool of LLM code gen. It is minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control."* — [Reilly Sweetland](https://x.com/rsweetland/status/1904963807237259586)
|
||||
- *"Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control."* — [Reilly Sweetland](https://x.com/rsweetland/status/1904963807237259586)
|
||||
- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)
|
||||
- *"Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone."* — [Joshua D Vander Hook](https://x.com/jodavaho/status/1911154899057795218)
|
||||
- *"thanks to aider, i have started and finished three personal projects within the last two days"* — [joseph stalzyn](https://x.com/anitaheeder/status/1908338609645904160)
|
||||
- *"Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words."* — [koleok](https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783)
|
||||
- *"Aider ... is the tool to benchmark against."* — [BeetleB](https://news.ycombinator.com/item?id=43930201)
|
||||
- *"aider is really cool"* — [kache (@yacineMTB)](https://x.com/yacineMTB/status/1911224442430124387)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from packaging import version
|
||||
|
||||
__version__ = "0.81.4.dev"
|
||||
__version__ = "0.83.1.dev"
|
||||
safe_version = __version__
|
||||
|
||||
try:
|
||||
|
||||
@@ -6,6 +6,7 @@ import sys
|
||||
from pathlib import Path
|
||||
|
||||
import configargparse
|
||||
import shtab
|
||||
|
||||
from aider import __version__
|
||||
from aider.args_formatter import (
|
||||
@@ -427,14 +428,20 @@ def get_parser(default_config_files, git_root):
|
||||
group.add_argument(
|
||||
"--attribute-author",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Attribute aider code changes in the git author name (default: True)",
|
||||
default=None,
|
||||
help=(
|
||||
"Attribute aider code changes in the git author name (default: True). If explicitly set"
|
||||
" to True, overrides --attribute-co-authored-by precedence."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-committer",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Attribute aider commits in the git committer name (default: True)",
|
||||
default=None,
|
||||
help=(
|
||||
"Attribute aider commits in the git committer name (default: True). If explicitly set"
|
||||
" to True, overrides --attribute-co-authored-by precedence for aider edits."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-commit-message-author",
|
||||
@@ -448,6 +455,16 @@ def get_parser(default_config_files, git_root):
|
||||
default=False,
|
||||
help="Prefix all commit messages with 'aider: ' (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-co-authored-by",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help=(
|
||||
"Attribute aider edits using the Co-authored-by trailer in the commit message"
|
||||
" (default: False). If True, this takes precedence over default --attribute-author and"
|
||||
" --attribute-committer behavior unless they are explicitly set to True."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--git-commit-verify",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
@@ -670,6 +687,12 @@ def get_parser(default_config_files, git_root):
|
||||
|
||||
######
|
||||
group = parser.add_argument_group("Other settings")
|
||||
group.add_argument(
|
||||
"--disable-playwright",
|
||||
action="store_true",
|
||||
help="Never prompt for or attempt to install Playwright for web scraping (default: False).",
|
||||
default=False,
|
||||
)
|
||||
group.add_argument(
|
||||
"--file",
|
||||
action="append",
|
||||
@@ -788,6 +811,17 @@ def get_parser(default_config_files, git_root):
|
||||
help="Specify which editor to use for the /editor command",
|
||||
)
|
||||
|
||||
supported_shells_list = sorted(list(shtab.SUPPORTED_SHELLS))
|
||||
group.add_argument(
|
||||
"--shell-completions",
|
||||
metavar="SHELL",
|
||||
choices=supported_shells_list,
|
||||
help=(
|
||||
"Print shell completion script for the specified SHELL and exit. Supported shells:"
|
||||
f" {', '.join(supported_shells_list)}. Example: aider --shell-completions bash"
|
||||
),
|
||||
)
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("Deprecated model settings")
|
||||
# Add deprecated model shortcut arguments
|
||||
@@ -836,13 +870,34 @@ def get_sample_dotenv():
|
||||
|
||||
|
||||
def main():
|
||||
arg = sys.argv[1] if len(sys.argv[1:]) else None
|
||||
|
||||
if arg == "md":
|
||||
print(get_md_help())
|
||||
elif arg == "dotenv":
|
||||
print(get_sample_dotenv())
|
||||
if len(sys.argv) > 1:
|
||||
command = sys.argv[1]
|
||||
else:
|
||||
command = "yaml" # Default to yaml if no command is given
|
||||
|
||||
if command == "md":
|
||||
print(get_md_help())
|
||||
elif command == "dotenv":
|
||||
print(get_sample_dotenv())
|
||||
elif command == "yaml":
|
||||
print(get_sample_yaml())
|
||||
elif command == "completion":
|
||||
if len(sys.argv) > 2:
|
||||
shell = sys.argv[2]
|
||||
if shell not in shtab.SUPPORTED_SHELLS:
|
||||
print(f"Error: Unsupported shell '{shell}'.", file=sys.stderr)
|
||||
print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
parser = get_parser([], None)
|
||||
parser.prog = "aider" # Set the program name on the parser
|
||||
print(shtab.complete(parser, shell=shell))
|
||||
else:
|
||||
print("Error: Please specify a shell for completion.", file=sys.stderr)
|
||||
print(f"Usage: python {sys.argv[0]} completion <shell_name>", file=sys.stderr)
|
||||
print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Default to YAML for any other unrecognized argument, or if 'yaml' was explicitly passed
|
||||
print(get_sample_yaml())
|
||||
|
||||
|
||||
|
||||
@@ -4,10 +4,13 @@ from .base_coder import Coder
|
||||
from .context_coder import ContextCoder
|
||||
from .editblock_coder import EditBlockCoder
|
||||
from .editblock_fenced_coder import EditBlockFencedCoder
|
||||
from .editor_diff_fenced_coder import EditorDiffFencedCoder
|
||||
from .editor_editblock_coder import EditorEditBlockCoder
|
||||
from .editor_whole_coder import EditorWholeFileCoder
|
||||
from .help_coder import HelpCoder
|
||||
from .patch_coder import PatchCoder
|
||||
from .udiff_coder import UnifiedDiffCoder
|
||||
from .udiff_simple import UnifiedDiffSimpleCoder
|
||||
from .wholefile_coder import WholeFileCoder
|
||||
|
||||
# from .single_wholefile_func_coder import SingleWholeFileFunctionCoder
|
||||
@@ -19,10 +22,13 @@ __all__ = [
|
||||
EditBlockCoder,
|
||||
EditBlockFencedCoder,
|
||||
WholeFileCoder,
|
||||
PatchCoder,
|
||||
UnifiedDiffCoder,
|
||||
UnifiedDiffSimpleCoder,
|
||||
# SingleWholeFileFunctionCoder,
|
||||
ArchitectCoder,
|
||||
EditorEditBlockCoder,
|
||||
EditorWholeFileCoder,
|
||||
EditorDiffFencedCoder,
|
||||
ContextCoder,
|
||||
]
|
||||
|
||||
@@ -8,7 +8,8 @@ class AskPrompts(CoderPrompts):
|
||||
Answer questions about the supplied code.
|
||||
Always reply to the user in {language}.
|
||||
|
||||
Describe code changes however you like. Don't use SEARCH/REPLACE blocks!
|
||||
Describe code changes however you like, but elide unchanging code.
|
||||
Don't use SEARCH/REPLACE blocks or return huge swaths of unchanging code.
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
@@ -32,4 +33,4 @@ Here are summaries of some files present in my git repo.
|
||||
If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.
|
||||
"""
|
||||
|
||||
system_reminder = ""
|
||||
system_reminder = "{final_reminders}"
|
||||
|
||||
@@ -15,10 +15,19 @@ import time
|
||||
import traceback
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
|
||||
# Optional dependency: used to convert locale codes (eg ``en_US``)
|
||||
# into human-readable language names (eg ``English``).
|
||||
try:
|
||||
from babel import Locale # type: ignore
|
||||
except ImportError: # Babel not installed – we will fall back to a small mapping
|
||||
Locale = None
|
||||
from json.decoder import JSONDecodeError
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from aider import __version__, models, prompts, urls, utils
|
||||
from aider.analytics import Analytics
|
||||
from aider.commands import Commands
|
||||
@@ -38,6 +47,7 @@ from aider.repo import ANY_GIT_ERROR, GitRepo
|
||||
from aider.repomap import RepoMap
|
||||
from aider.run_cmd import run_cmd
|
||||
from aider.utils import format_content, format_messages, format_tokens, is_image_file
|
||||
from aider.waiting import WaitingSpinner
|
||||
|
||||
from ..dump import dump # noqa: F401
|
||||
from .chat_chunks import ChatChunks
|
||||
@@ -101,8 +111,6 @@ class Coder:
|
||||
partial_response_content = ""
|
||||
commit_before_message = []
|
||||
message_cost = 0.0
|
||||
message_tokens_sent = 0
|
||||
message_tokens_received = 0
|
||||
add_cache_headers = False
|
||||
cache_warming_thread = None
|
||||
num_cache_warming_pings = 0
|
||||
@@ -168,6 +176,8 @@ class Coder:
|
||||
commands=from_coder.commands.clone(),
|
||||
total_cost=from_coder.total_cost,
|
||||
ignore_mentions=from_coder.ignore_mentions,
|
||||
total_tokens_sent=from_coder.total_tokens_sent,
|
||||
total_tokens_received=from_coder.total_tokens_received,
|
||||
file_watcher=from_coder.file_watcher,
|
||||
)
|
||||
use_kwargs.update(update) # override to complete the switch
|
||||
@@ -320,6 +330,8 @@ class Coder:
|
||||
chat_language=None,
|
||||
detect_urls=True,
|
||||
ignore_mentions=None,
|
||||
total_tokens_sent=0,
|
||||
total_tokens_received=0,
|
||||
file_watcher=None,
|
||||
auto_copy_context=False,
|
||||
auto_accept_architect=True,
|
||||
@@ -366,6 +378,10 @@ class Coder:
|
||||
self.need_commit_before_edits = set()
|
||||
|
||||
self.total_cost = total_cost
|
||||
self.total_tokens_sent = total_tokens_sent
|
||||
self.total_tokens_received = total_tokens_received
|
||||
self.message_tokens_sent = 0
|
||||
self.message_tokens_received = 0
|
||||
|
||||
self.verbose = verbose
|
||||
self.abs_fnames = set()
|
||||
@@ -564,6 +580,15 @@ class Coder:
|
||||
|
||||
return True
|
||||
|
||||
def _stop_waiting_spinner(self):
|
||||
"""Stop and clear the waiting spinner if it is running."""
|
||||
spinner = getattr(self, "waiting_spinner", None)
|
||||
if spinner:
|
||||
try:
|
||||
spinner.stop()
|
||||
finally:
|
||||
self.waiting_spinner = None
|
||||
|
||||
def get_abs_fnames_content(self):
|
||||
for fname in list(self.abs_fnames):
|
||||
content = self.io.read_text(fname)
|
||||
@@ -953,6 +978,9 @@ class Coder:
|
||||
return inp
|
||||
|
||||
def keyboard_interrupt(self):
|
||||
# Ensure cursor is visible on exit
|
||||
Console().show_cursor(True)
|
||||
|
||||
now = time.time()
|
||||
|
||||
thresh = 2 # seconds
|
||||
@@ -1011,23 +1039,75 @@ class Coder:
|
||||
]
|
||||
self.cur_messages = []
|
||||
|
||||
def get_user_language(self):
|
||||
if self.chat_language:
|
||||
return self.chat_language
|
||||
def normalize_language(self, lang_code):
|
||||
"""
|
||||
Convert a locale code such as ``en_US`` or ``fr`` into a readable
|
||||
language name (e.g. ``English`` or ``French``). If Babel is
|
||||
available it is used for reliable conversion; otherwise a small
|
||||
built-in fallback map handles common languages.
|
||||
"""
|
||||
if not lang_code:
|
||||
return None
|
||||
|
||||
# Probably already a language name
|
||||
if (
|
||||
len(lang_code) > 3
|
||||
and "_" not in lang_code
|
||||
and "-" not in lang_code
|
||||
and lang_code[0].isupper()
|
||||
):
|
||||
return lang_code
|
||||
|
||||
# Preferred: Babel
|
||||
if Locale is not None:
|
||||
try:
|
||||
loc = Locale.parse(lang_code.replace("-", "_"))
|
||||
return loc.get_display_name("en").capitalize()
|
||||
except Exception:
|
||||
pass # Fall back to manual mapping
|
||||
|
||||
# Simple fallback for common languages
|
||||
fallback = {
|
||||
"en": "English",
|
||||
"fr": "French",
|
||||
"es": "Spanish",
|
||||
"de": "German",
|
||||
"it": "Italian",
|
||||
"pt": "Portuguese",
|
||||
"zh": "Chinese",
|
||||
"ja": "Japanese",
|
||||
"ko": "Korean",
|
||||
"ru": "Russian",
|
||||
}
|
||||
return fallback.get(lang_code.split("_")[0].lower(), lang_code)
|
||||
|
||||
def get_user_language(self):
|
||||
"""
|
||||
Detect the user's language preference and return a human-readable
|
||||
language name such as ``English``. Detection order:
|
||||
|
||||
1. ``self.chat_language`` if explicitly set
|
||||
2. ``locale.getlocale()``
|
||||
3. ``LANG`` / ``LANGUAGE`` / ``LC_ALL`` / ``LC_MESSAGES`` environment variables
|
||||
"""
|
||||
# Explicit override
|
||||
if self.chat_language:
|
||||
return self.normalize_language(self.chat_language)
|
||||
|
||||
# System locale
|
||||
try:
|
||||
lang = locale.getlocale()[0]
|
||||
if lang:
|
||||
return lang # Return the full language code, including country
|
||||
return self.normalize_language(lang)
|
||||
except Exception:
|
||||
pass
|
||||
pass # pragma: no cover
|
||||
|
||||
for env_var in ["LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"]:
|
||||
# Environment variables
|
||||
for env_var in ("LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"):
|
||||
lang = os.environ.get(env_var)
|
||||
if lang:
|
||||
return lang.split(".")[
|
||||
0
|
||||
] # Return language and country, but remove encoding if present
|
||||
lang = lang.split(".")[0] # Strip encoding if present
|
||||
return self.normalize_language(lang)
|
||||
|
||||
return None
|
||||
|
||||
@@ -1079,23 +1159,28 @@ class Coder:
|
||||
return platform_text
|
||||
|
||||
def fmt_system_prompt(self, prompt):
|
||||
final_reminders = []
|
||||
if self.main_model.lazy:
|
||||
lazy_prompt = self.gpt_prompts.lazy_prompt
|
||||
elif self.main_model.overeager:
|
||||
lazy_prompt = self.gpt_prompts.overeager_prompt
|
||||
else:
|
||||
lazy_prompt = ""
|
||||
final_reminders.append(self.gpt_prompts.lazy_prompt)
|
||||
if self.main_model.overeager:
|
||||
final_reminders.append(self.gpt_prompts.overeager_prompt)
|
||||
|
||||
user_lang = self.get_user_language()
|
||||
if user_lang:
|
||||
final_reminders.append(f"Reply in {user_lang}.\n")
|
||||
|
||||
platform_text = self.get_platform_info()
|
||||
|
||||
if self.suggest_shell_commands:
|
||||
shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)
|
||||
shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)
|
||||
rename_with_shell = self.gpt_prompts.rename_with_shell
|
||||
else:
|
||||
shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)
|
||||
shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(
|
||||
platform=platform_text
|
||||
)
|
||||
rename_with_shell = ""
|
||||
|
||||
if self.chat_language:
|
||||
language = self.chat_language
|
||||
@@ -1109,24 +1194,27 @@ class Coder:
|
||||
else:
|
||||
quad_backtick_reminder = ""
|
||||
|
||||
final_reminders = "\n\n".join(final_reminders)
|
||||
|
||||
prompt = prompt.format(
|
||||
fence=self.fence,
|
||||
quad_backtick_reminder=quad_backtick_reminder,
|
||||
lazy_prompt=lazy_prompt,
|
||||
final_reminders=final_reminders,
|
||||
platform=platform_text,
|
||||
shell_cmd_prompt=shell_cmd_prompt,
|
||||
rename_with_shell=rename_with_shell,
|
||||
shell_cmd_reminder=shell_cmd_reminder,
|
||||
go_ahead_tip=self.gpt_prompts.go_ahead_tip,
|
||||
language=language,
|
||||
)
|
||||
|
||||
if self.main_model.system_prompt_prefix:
|
||||
prompt = self.main_model.system_prompt_prefix + prompt
|
||||
|
||||
return prompt
|
||||
|
||||
def format_chat_chunks(self):
|
||||
self.choose_fence()
|
||||
main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
|
||||
if self.main_model.system_prompt_prefix:
|
||||
main_sys = self.main_model.system_prompt_prefix + "\n" + main_sys
|
||||
|
||||
example_messages = []
|
||||
if self.main_model.examples_as_sys_msg:
|
||||
@@ -1335,8 +1423,13 @@ class Coder:
|
||||
utils.show_messages(messages, functions=self.functions)
|
||||
|
||||
self.multi_response_content = ""
|
||||
if self.show_pretty() and self.stream:
|
||||
self.mdstream = self.io.get_assistant_mdstream()
|
||||
if self.show_pretty():
|
||||
self.waiting_spinner = WaitingSpinner("Waiting for " + self.main_model.name)
|
||||
self.waiting_spinner.start()
|
||||
if self.stream:
|
||||
self.mdstream = self.io.get_assistant_mdstream()
|
||||
else:
|
||||
self.mdstream = None
|
||||
else:
|
||||
self.mdstream = None
|
||||
|
||||
@@ -1409,6 +1502,9 @@ class Coder:
|
||||
self.live_incremental_response(True)
|
||||
self.mdstream = None
|
||||
|
||||
# Ensure any waiting spinner is stopped
|
||||
self._stop_waiting_spinner()
|
||||
|
||||
self.partial_response_content = self.get_multi_response_content_in_progress(True)
|
||||
self.remove_reasoning_content()
|
||||
self.multi_response_content = ""
|
||||
@@ -1725,6 +1821,9 @@ class Coder:
|
||||
self.io.ai_output(json.dumps(args, indent=4))
|
||||
|
||||
def show_send_output(self, completion):
|
||||
# Stop spinner once we have a response
|
||||
self._stop_waiting_spinner()
|
||||
|
||||
if self.verbose:
|
||||
print(completion)
|
||||
|
||||
@@ -1839,6 +1938,8 @@ class Coder:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if received_content:
|
||||
self._stop_waiting_spinner()
|
||||
self.partial_response_content += text
|
||||
|
||||
if self.show_pretty():
|
||||
@@ -1918,6 +2019,44 @@ class Coder:
|
||||
self.usage_report = tokens_report
|
||||
return
|
||||
|
||||
try:
|
||||
# Try and use litellm's built in cost calculator. Seems to work for non-streaming only?
|
||||
cost = litellm.completion_cost(completion_response=completion)
|
||||
except Exception:
|
||||
cost = 0
|
||||
|
||||
if not cost:
|
||||
cost = self.compute_costs_from_tokens(
|
||||
prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
||||
)
|
||||
|
||||
self.total_cost += cost
|
||||
self.message_cost += cost
|
||||
|
||||
def format_cost(value):
|
||||
if value == 0:
|
||||
return "0.00"
|
||||
magnitude = abs(value)
|
||||
if magnitude >= 0.01:
|
||||
return f"{value:.2f}"
|
||||
else:
|
||||
return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
|
||||
|
||||
cost_report = (
|
||||
f"Cost: ${format_cost(self.message_cost)} message,"
|
||||
f" ${format_cost(self.total_cost)} session."
|
||||
)
|
||||
|
||||
if cache_hit_tokens and cache_write_tokens:
|
||||
sep = "\n"
|
||||
else:
|
||||
sep = " "
|
||||
|
||||
self.usage_report = tokens_report + sep + cost_report
|
||||
|
||||
def compute_costs_from_tokens(
|
||||
self, prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
||||
):
|
||||
cost = 0
|
||||
|
||||
input_cost_per_token = self.main_model.info.get("input_cost_per_token") or 0
|
||||
@@ -1945,35 +2084,15 @@ class Coder:
|
||||
cost += prompt_tokens * input_cost_per_token
|
||||
|
||||
cost += completion_tokens * output_cost_per_token
|
||||
|
||||
self.total_cost += cost
|
||||
self.message_cost += cost
|
||||
|
||||
def format_cost(value):
|
||||
if value == 0:
|
||||
return "0.00"
|
||||
magnitude = abs(value)
|
||||
if magnitude >= 0.01:
|
||||
return f"{value:.2f}"
|
||||
else:
|
||||
return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
|
||||
|
||||
cost_report = (
|
||||
f"Cost: ${format_cost(self.message_cost)} message,"
|
||||
f" ${format_cost(self.total_cost)} session."
|
||||
)
|
||||
|
||||
if cache_hit_tokens and cache_write_tokens:
|
||||
sep = "\n"
|
||||
else:
|
||||
sep = " "
|
||||
|
||||
self.usage_report = tokens_report + sep + cost_report
|
||||
return cost
|
||||
|
||||
def show_usage_report(self):
|
||||
if not self.usage_report:
|
||||
return
|
||||
|
||||
self.total_tokens_sent += self.message_tokens_sent
|
||||
self.total_tokens_received += self.message_tokens_received
|
||||
|
||||
self.io.tool_output(self.usage_report)
|
||||
|
||||
prompt_tokens = self.message_tokens_sent
|
||||
@@ -2248,7 +2367,7 @@ class Coder:
|
||||
context = self.get_context_from_history(self.cur_messages)
|
||||
|
||||
try:
|
||||
res = self.repo.commit(fnames=edited, context=context, aider_edits=True)
|
||||
res = self.repo.commit(fnames=edited, context=context, aider_edits=True, coder=self)
|
||||
if res:
|
||||
self.show_auto_commit_outcome(res)
|
||||
commit_hash, commit_message = res
|
||||
@@ -2284,7 +2403,7 @@ class Coder:
|
||||
if not self.repo:
|
||||
return
|
||||
|
||||
self.repo.commit(fnames=self.need_commit_before_edits)
|
||||
self.repo.commit(fnames=self.need_commit_before_edits, coder=self)
|
||||
|
||||
# files changed, move cur messages back behind the files messages
|
||||
# self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
|
||||
|
||||
@@ -15,7 +15,9 @@ You always COMPLETELY IMPLEMENT the needed code!
|
||||
"""
|
||||
|
||||
overeager_prompt = """Pay careful attention to the scope of the user's request.
|
||||
Do what they ask, but no more."""
|
||||
Do what they ask, but no more.
|
||||
Do not improve, comment, fix or modify unrelated parts of the code in any way!
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
|
||||
@@ -53,3 +55,6 @@ Do not edit these files!
|
||||
shell_cmd_reminder = ""
|
||||
no_shell_cmd_prompt = ""
|
||||
no_shell_cmd_reminder = ""
|
||||
|
||||
rename_with_shell = ""
|
||||
go_ahead_tip = ""
|
||||
|
||||
@@ -412,7 +412,16 @@ def strip_filename(filename, fence):
|
||||
return
|
||||
|
||||
start_fence = fence[0]
|
||||
if filename.startswith(start_fence) or filename.startswith(triple_backticks):
|
||||
if filename.startswith(start_fence):
|
||||
candidate = filename[len(start_fence) :]
|
||||
if candidate and ("." in candidate or "/" in candidate):
|
||||
return candidate
|
||||
return
|
||||
|
||||
if filename.startswith(triple_backticks):
|
||||
candidate = filename[len(triple_backticks) :]
|
||||
if candidate and ("." in candidate or "/" in candidate):
|
||||
return candidate
|
||||
return
|
||||
|
||||
filename = filename.rstrip(":")
|
||||
@@ -454,7 +463,14 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
|
||||
"```csh",
|
||||
"```tcsh",
|
||||
]
|
||||
next_is_editblock = i + 1 < len(lines) and head_pattern.match(lines[i + 1].strip())
|
||||
|
||||
# Check if the next line or the one after that is an editblock
|
||||
next_is_editblock = (
|
||||
i + 1 < len(lines)
|
||||
and head_pattern.match(lines[i + 1].strip())
|
||||
or i + 2 < len(lines)
|
||||
and head_pattern.match(lines[i + 2].strip())
|
||||
)
|
||||
|
||||
if any(line.strip().startswith(start) for start in shell_starts) and not next_is_editblock:
|
||||
shell_content = []
|
||||
|
||||
@@ -5,5 +5,6 @@ from .editblock_fenced_prompts import EditBlockFencedPrompts
|
||||
|
||||
class EditBlockFencedCoder(EditBlockCoder):
|
||||
"""A coder that uses fenced search/replace blocks for code modifications."""
|
||||
|
||||
edit_format = "diff-fenced"
|
||||
gpt_prompts = EditBlockFencedPrompts()
|
||||
|
||||
@@ -94,7 +94,8 @@ from hello import hello
|
||||
),
|
||||
]
|
||||
|
||||
system_reminder = """# *SEARCH/REPLACE block* Rules:
|
||||
system_reminder = """
|
||||
# *SEARCH/REPLACE block* Rules:
|
||||
|
||||
Every *SEARCH/REPLACE block* must use this format:
|
||||
1. The opening fence and code language, eg: {fence[0]}python
|
||||
@@ -136,7 +137,7 @@ To rename files which have been added to the chat, use shell commands at the end
|
||||
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
|
||||
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
|
||||
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{shell_cmd_reminder}
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from . import shell
|
||||
from .base_prompts import CoderPrompts
|
||||
|
||||
|
||||
@@ -7,7 +8,7 @@ class EditBlockPrompts(CoderPrompts):
|
||||
main_system = """Act as an expert software developer.
|
||||
Always use best practices when coding.
|
||||
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Take requests for changes to the supplied code.
|
||||
If the request is ambiguous, ask questions.
|
||||
|
||||
@@ -28,32 +29,6 @@ You can keep asking if you then decide you need to edit more files.
|
||||
All changes to files must use this *SEARCH/REPLACE block* format.
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{shell_cmd_prompt}
|
||||
"""
|
||||
|
||||
shell_cmd_prompt = """
|
||||
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
|
||||
|
||||
Just suggest shell commands this way, not example code.
|
||||
Only suggest complete shell commands that are ready to execute, without placeholders.
|
||||
Only suggest at most a few shell commands at a time, not more than 1-3, one per line.
|
||||
Do not suggest multi-line shell commands.
|
||||
All shell commands will run from the root directory of the user's project.
|
||||
|
||||
Use the appropriate shell based on the user's system info:
|
||||
{platform}
|
||||
Examples of when to suggest shell commands:
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
"""
|
||||
|
||||
no_shell_cmd_prompt = """
|
||||
Keep in mind these details about the user's platform and environment:
|
||||
{platform}
|
||||
"""
|
||||
example_messages = [
|
||||
dict(
|
||||
@@ -181,23 +156,19 @@ If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
|
||||
- An empty `SEARCH` section
|
||||
- The new file's contents in the `REPLACE` section
|
||||
|
||||
To rename files which have been added to the chat, use shell commands at the end of your response.
|
||||
|
||||
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
|
||||
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
|
||||
|
||||
{lazy_prompt}
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{rename_with_shell}{go_ahead_tip}{final_reminders}ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{shell_cmd_reminder}
|
||||
"""
|
||||
|
||||
shell_cmd_reminder = """
|
||||
Examples of when to suggest shell commands:
|
||||
rename_with_shell = """To rename files which have been added to the chat, use shell commands at the end of your response.
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
"""
|
||||
|
||||
go_ahead_tip = """If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
|
||||
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
|
||||
|
||||
"""
|
||||
|
||||
shell_cmd_prompt = shell.shell_cmd_prompt
|
||||
no_shell_cmd_prompt = shell.no_shell_cmd_prompt
|
||||
shell_cmd_reminder = shell.shell_cmd_reminder
|
||||
|
||||
9
aider/coders/editor_diff_fenced_coder.py
Normal file
9
aider/coders/editor_diff_fenced_coder.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .editblock_fenced_coder import EditBlockFencedCoder
|
||||
from .editor_diff_fenced_prompts import EditorDiffFencedPrompts
|
||||
|
||||
|
||||
class EditorDiffFencedCoder(EditBlockFencedCoder):
|
||||
"A coder that uses search/replace blocks, focused purely on editing files."
|
||||
|
||||
edit_format = "editor-diff-fenced"
|
||||
gpt_prompts = EditorDiffFencedPrompts()
|
||||
11
aider/coders/editor_diff_fenced_prompts.py
Normal file
11
aider/coders/editor_diff_fenced_prompts.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from .editblock_fenced_prompts import EditBlockFencedPrompts
|
||||
|
||||
|
||||
class EditorDiffFencedPrompts(EditBlockFencedPrompts):
|
||||
shell_cmd_prompt = ""
|
||||
no_shell_cmd_prompt = ""
|
||||
shell_cmd_reminder = ""
|
||||
go_ahead_tip = ""
|
||||
rename_with_shell = ""
|
||||
@@ -5,7 +5,7 @@ from .editblock_prompts import EditBlockPrompts
|
||||
|
||||
class EditorEditBlockPrompts(EditBlockPrompts):
|
||||
main_system = """Act as an expert software developer who edits source code.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Describe each change with a *SEARCH/REPLACE block* per the examples below.
|
||||
All changes to files must use this *SEARCH/REPLACE block* format.
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
@@ -14,3 +14,5 @@ ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
shell_cmd_prompt = ""
|
||||
no_shell_cmd_prompt = ""
|
||||
shell_cmd_reminder = ""
|
||||
go_ahead_tip = ""
|
||||
rename_with_shell = ""
|
||||
|
||||
@@ -5,6 +5,6 @@ from .wholefile_prompts import WholeFilePrompts
|
||||
|
||||
class EditorWholeFilePrompts(WholeFilePrompts):
|
||||
main_system = """Act as an expert software developer and make changes to source code.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Output a copy of each file that needs changes.
|
||||
"""
|
||||
|
||||
@@ -5,6 +5,7 @@ from .help_prompts import HelpPrompts
|
||||
|
||||
class HelpCoder(Coder):
|
||||
"""Interactive help and documentation about aider."""
|
||||
|
||||
edit_format = "help"
|
||||
gpt_prompts = HelpPrompts()
|
||||
|
||||
|
||||
706
aider/coders/patch_coder.py
Normal file
706
aider/coders/patch_coder.py
Normal file
@@ -0,0 +1,706 @@
|
||||
import pathlib
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from .base_coder import Coder
|
||||
from .patch_prompts import PatchPrompts
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Domain objects & Exceptions (Adapted from apply_patch.py)
|
||||
# --------------------------------------------------------------------------- #
|
||||
class DiffError(ValueError):
|
||||
"""Any problem detected while parsing or applying a patch."""
|
||||
|
||||
|
||||
class ActionType(str, Enum):
|
||||
ADD = "Add"
|
||||
DELETE = "Delete"
|
||||
UPDATE = "Update"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Chunk:
|
||||
orig_index: int = -1 # Line number in the *original* file block where the change starts
|
||||
del_lines: List[str] = field(default_factory=list)
|
||||
ins_lines: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PatchAction:
|
||||
type: ActionType
|
||||
path: str
|
||||
# For ADD:
|
||||
new_content: Optional[str] = None
|
||||
# For UPDATE:
|
||||
chunks: List[Chunk] = field(default_factory=list)
|
||||
move_path: Optional[str] = None
|
||||
|
||||
|
||||
# Type alias for the return type of get_edits
|
||||
EditResult = Tuple[str, PatchAction]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Patch:
|
||||
actions: Dict[str, PatchAction] = field(default_factory=dict)
|
||||
fuzz: int = 0 # Track fuzziness used during parsing
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Helper functions (Adapted from apply_patch.py)
|
||||
# --------------------------------------------------------------------------- #
|
||||
def _norm(line: str) -> str:
|
||||
"""Strip CR so comparisons work for both LF and CRLF input."""
|
||||
return line.rstrip("\r")
|
||||
|
||||
|
||||
def find_context_core(lines: List[str], context: List[str], start: int) -> Tuple[int, int]:
|
||||
"""Finds context block, returns start index and fuzz level."""
|
||||
if not context:
|
||||
return start, 0
|
||||
|
||||
# Exact match
|
||||
for i in range(start, len(lines) - len(context) + 1):
|
||||
if lines[i : i + len(context)] == context:
|
||||
return i, 0
|
||||
# Rstrip match
|
||||
norm_context = [s.rstrip() for s in context]
|
||||
for i in range(start, len(lines) - len(context) + 1):
|
||||
if [s.rstrip() for s in lines[i : i + len(context)]] == norm_context:
|
||||
return i, 1 # Fuzz level 1
|
||||
# Strip match
|
||||
norm_context_strip = [s.strip() for s in context]
|
||||
for i in range(start, len(lines) - len(context) + 1):
|
||||
if [s.strip() for s in lines[i : i + len(context)]] == norm_context_strip:
|
||||
return i, 100 # Fuzz level 100
|
||||
return -1, 0
|
||||
|
||||
|
||||
def find_context(lines: List[str], context: List[str], start: int, eof: bool) -> Tuple[int, int]:
|
||||
"""Finds context, handling EOF marker."""
|
||||
if eof:
|
||||
# If EOF marker, first try matching at the very end
|
||||
if len(lines) >= len(context):
|
||||
new_index, fuzz = find_context_core(lines, context, len(lines) - len(context))
|
||||
if new_index != -1:
|
||||
return new_index, fuzz
|
||||
# If not found at end, search from `start` as fallback
|
||||
new_index, fuzz = find_context_core(lines, context, start)
|
||||
return new_index, fuzz + 10_000 # Add large fuzz penalty if EOF wasn't at end
|
||||
# Normal case: search from `start`
|
||||
return find_context_core(lines, context, start)
|
||||
|
||||
|
||||
def peek_next_section(lines: List[str], index: int) -> Tuple[List[str], List[Chunk], int, bool]:
|
||||
"""
|
||||
Parses one section (context, -, + lines) of an Update block.
|
||||
Returns: (context_lines, chunks_in_section, next_index, is_eof)
|
||||
"""
|
||||
context_lines: List[str] = []
|
||||
del_lines: List[str] = []
|
||||
ins_lines: List[str] = []
|
||||
chunks: List[Chunk] = []
|
||||
mode = "keep" # Start by expecting context lines
|
||||
start_index = index
|
||||
|
||||
while index < len(lines):
|
||||
line = lines[index]
|
||||
norm_line = _norm(line)
|
||||
|
||||
# Check for section terminators
|
||||
if norm_line.startswith(
|
||||
(
|
||||
"@@",
|
||||
"*** End Patch",
|
||||
"*** Update File:",
|
||||
"*** Delete File:",
|
||||
"*** Add File:",
|
||||
"*** End of File", # Special terminator
|
||||
)
|
||||
):
|
||||
break
|
||||
if norm_line == "***": # Legacy/alternative terminator? Handle just in case.
|
||||
break
|
||||
if norm_line.startswith("***"): # Invalid line
|
||||
raise DiffError(f"Invalid patch line found in update section: {line}")
|
||||
|
||||
index += 1
|
||||
last_mode = mode
|
||||
|
||||
# Determine line type and strip prefix
|
||||
if line.startswith("+"):
|
||||
mode = "add"
|
||||
line_content = line[1:]
|
||||
elif line.startswith("-"):
|
||||
mode = "delete"
|
||||
line_content = line[1:]
|
||||
elif line.startswith(" "):
|
||||
mode = "keep"
|
||||
line_content = line[1:]
|
||||
elif line.strip() == "": # Treat blank lines in patch as context ' '
|
||||
mode = "keep"
|
||||
line_content = "" # Keep it as a blank line
|
||||
else:
|
||||
# Assume lines without prefix are context if format is loose,
|
||||
# but strict format requires ' '. Raise error for strictness.
|
||||
raise DiffError(f"Invalid line prefix in update section: {line}")
|
||||
|
||||
# If mode changes from add/delete back to keep, finalize the previous chunk
|
||||
if mode == "keep" and last_mode != "keep":
|
||||
if del_lines or ins_lines:
|
||||
chunks.append(
|
||||
Chunk(
|
||||
# orig_index is relative to the start of the *context* block found
|
||||
orig_index=len(context_lines) - len(del_lines),
|
||||
del_lines=del_lines,
|
||||
ins_lines=ins_lines,
|
||||
)
|
||||
)
|
||||
del_lines, ins_lines = [], []
|
||||
|
||||
# Collect lines based on mode
|
||||
if mode == "delete":
|
||||
del_lines.append(line_content)
|
||||
context_lines.append(line_content) # Deleted lines are part of the original context
|
||||
elif mode == "add":
|
||||
ins_lines.append(line_content)
|
||||
elif mode == "keep":
|
||||
context_lines.append(line_content)
|
||||
|
||||
# Finalize any pending chunk at the end of the section
|
||||
if del_lines or ins_lines:
|
||||
chunks.append(
|
||||
Chunk(
|
||||
orig_index=len(context_lines) - len(del_lines),
|
||||
del_lines=del_lines,
|
||||
ins_lines=ins_lines,
|
||||
)
|
||||
)
|
||||
|
||||
# Check for EOF marker
|
||||
is_eof = False
|
||||
if index < len(lines) and _norm(lines[index]) == "*** End of File":
|
||||
index += 1
|
||||
is_eof = True
|
||||
|
||||
if index == start_index and not is_eof: # Should not happen if patch is well-formed
|
||||
raise DiffError("Empty patch section found.")
|
||||
|
||||
return context_lines, chunks, index, is_eof
|
||||
|
||||
|
||||
def identify_files_needed(text: str) -> List[str]:
|
||||
"""Extracts file paths from Update and Delete actions."""
|
||||
lines = text.splitlines()
|
||||
paths = set()
|
||||
for line in lines:
|
||||
norm_line = _norm(line)
|
||||
if norm_line.startswith("*** Update File: "):
|
||||
paths.add(norm_line[len("*** Update File: ") :].strip())
|
||||
elif norm_line.startswith("*** Delete File: "):
|
||||
paths.add(norm_line[len("*** Delete File: ") :].strip())
|
||||
return list(paths)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# PatchCoder Class Implementation
|
||||
# --------------------------------------------------------------------------- #
|
||||
class PatchCoder(Coder):
|
||||
"""
|
||||
A coder that uses a custom patch format for code modifications,
|
||||
inspired by the format described in tmp.gpt41edits.txt.
|
||||
Applies patches using logic adapted from the reference apply_patch.py script.
|
||||
"""
|
||||
|
||||
edit_format = "patch"
|
||||
gpt_prompts = PatchPrompts()
|
||||
|
||||
def get_edits(self) -> List[EditResult]:
|
||||
"""
|
||||
Parses the LLM response content (containing the patch) into a list of
|
||||
tuples, where each tuple contains the file path and the PatchAction object.
|
||||
"""
|
||||
content = self.partial_response_content
|
||||
if not content or not content.strip():
|
||||
return []
|
||||
|
||||
# Check for patch sentinels
|
||||
lines = content.splitlines()
|
||||
if (
|
||||
len(lines) < 2
|
||||
or not _norm(lines[0]).startswith("*** Begin Patch")
|
||||
# Allow flexible end, might be EOF or just end of stream
|
||||
# or _norm(lines[-1]) != "*** End Patch"
|
||||
):
|
||||
# Tolerate missing sentinels if content looks like a patch action
|
||||
is_patch_like = any(
|
||||
_norm(line).startswith(
|
||||
("@@", "*** Update File:", "*** Add File:", "*** Delete File:")
|
||||
)
|
||||
for line in lines
|
||||
)
|
||||
if not is_patch_like:
|
||||
# If it doesn't even look like a patch, return empty
|
||||
self.io.tool_warning("Response does not appear to be in patch format.")
|
||||
return []
|
||||
# If it looks like a patch but lacks sentinels, try parsing anyway but warn.
|
||||
self.io.tool_warning(
|
||||
"Patch format warning: Missing '*** Begin Patch'/'*** End Patch' sentinels."
|
||||
)
|
||||
start_index = 0
|
||||
else:
|
||||
start_index = 1 # Skip "*** Begin Patch"
|
||||
|
||||
# Identify files needed for context lookups during parsing
|
||||
needed_paths = identify_files_needed(content)
|
||||
current_files: Dict[str, str] = {}
|
||||
for rel_path in needed_paths:
|
||||
abs_path = self.abs_root_path(rel_path)
|
||||
try:
|
||||
# Use io.read_text to handle potential errors/encodings
|
||||
file_content = self.io.read_text(abs_path)
|
||||
if file_content is None:
|
||||
raise DiffError(
|
||||
f"File referenced in patch not found or could not be read: {rel_path}"
|
||||
)
|
||||
current_files[rel_path] = file_content
|
||||
except FileNotFoundError:
|
||||
raise DiffError(f"File referenced in patch not found: {rel_path}")
|
||||
except IOError as e:
|
||||
raise DiffError(f"Error reading file {rel_path}: {e}")
|
||||
|
||||
try:
|
||||
# Parse the patch text using adapted logic
|
||||
patch_obj = self._parse_patch_text(lines, start_index, current_files)
|
||||
# Convert Patch object actions dict to a list of tuples (path, action)
|
||||
# for compatibility with the base Coder's prepare_to_edit method.
|
||||
results = []
|
||||
for path, action in patch_obj.actions.items():
|
||||
results.append((path, action))
|
||||
return results
|
||||
except DiffError as e:
|
||||
# Raise as ValueError for consistency with other coders' error handling
|
||||
raise ValueError(f"Error parsing patch content: {e}")
|
||||
except Exception as e:
|
||||
# Catch unexpected errors during parsing
|
||||
raise ValueError(f"Unexpected error parsing patch: {e}")
|
||||
|
||||
def _parse_patch_text(
|
||||
self, lines: List[str], start_index: int, current_files: Dict[str, str]
|
||||
) -> Patch:
|
||||
"""
|
||||
Parses patch content lines into a Patch object.
|
||||
Adapted from the Parser class in apply_patch.py.
|
||||
"""
|
||||
patch = Patch()
|
||||
index = start_index
|
||||
fuzz_accumulator = 0
|
||||
|
||||
while index < len(lines):
|
||||
line = lines[index]
|
||||
norm_line = _norm(line)
|
||||
|
||||
if norm_line == "*** End Patch":
|
||||
index += 1
|
||||
break # Successfully reached end
|
||||
|
||||
# ---------- UPDATE ---------- #
|
||||
if norm_line.startswith("*** Update File: "):
|
||||
path = norm_line[len("*** Update File: ") :].strip()
|
||||
index += 1
|
||||
if not path:
|
||||
raise DiffError("Update File action missing path.")
|
||||
|
||||
# Optional move target
|
||||
move_to = None
|
||||
if index < len(lines) and _norm(lines[index]).startswith("*** Move to: "):
|
||||
move_to = _norm(lines[index])[len("*** Move to: ") :].strip()
|
||||
index += 1
|
||||
if not move_to:
|
||||
raise DiffError("Move to action missing path.")
|
||||
|
||||
if path not in current_files:
|
||||
raise DiffError(f"Update File Error - missing file content for: {path}")
|
||||
|
||||
file_content = current_files[path]
|
||||
|
||||
existing_action = patch.actions.get(path)
|
||||
if existing_action is not None:
|
||||
# Merge additional UPDATE block into the existing one
|
||||
if existing_action.type != ActionType.UPDATE:
|
||||
raise DiffError(f"Conflicting actions for file: {path}")
|
||||
|
||||
new_action, index, fuzz = self._parse_update_file_sections(
|
||||
lines, index, file_content
|
||||
)
|
||||
existing_action.chunks.extend(new_action.chunks)
|
||||
|
||||
if move_to:
|
||||
if existing_action.move_path and existing_action.move_path != move_to:
|
||||
raise DiffError(f"Conflicting move targets for file: {path}")
|
||||
existing_action.move_path = move_to
|
||||
fuzz_accumulator += fuzz
|
||||
else:
|
||||
# First UPDATE block for this file
|
||||
action, index, fuzz = self._parse_update_file_sections(
|
||||
lines, index, file_content
|
||||
)
|
||||
action.path = path
|
||||
action.move_path = move_to
|
||||
patch.actions[path] = action
|
||||
fuzz_accumulator += fuzz
|
||||
continue
|
||||
|
||||
# ---------- DELETE ---------- #
|
||||
elif norm_line.startswith("*** Delete File: "):
|
||||
path = norm_line[len("*** Delete File: ") :].strip()
|
||||
index += 1
|
||||
if not path:
|
||||
raise DiffError("Delete File action missing path.")
|
||||
existing_action = patch.actions.get(path)
|
||||
if existing_action:
|
||||
if existing_action.type == ActionType.DELETE:
|
||||
# Duplicate delete – ignore the extra block
|
||||
self.io.tool_warning(f"Duplicate delete action for file: {path} ignored.")
|
||||
continue
|
||||
else:
|
||||
raise DiffError(f"Conflicting actions for file: {path}")
|
||||
if path not in current_files:
|
||||
raise DiffError(
|
||||
f"Delete File Error - file not found: {path}"
|
||||
) # Check against known files
|
||||
|
||||
patch.actions[path] = PatchAction(type=ActionType.DELETE, path=path)
|
||||
continue
|
||||
|
||||
# ---------- ADD ---------- #
|
||||
elif norm_line.startswith("*** Add File: "):
|
||||
path = norm_line[len("*** Add File: ") :].strip()
|
||||
index += 1
|
||||
if not path:
|
||||
raise DiffError("Add File action missing path.")
|
||||
if path in patch.actions:
|
||||
raise DiffError(f"Duplicate action for file: {path}")
|
||||
# Check if file exists in the context provided (should not for Add).
|
||||
# Note: We only have needed files, a full check requires FS access.
|
||||
# if path in current_files:
|
||||
# raise DiffError(f"Add File Error - file already exists: {path}")
|
||||
|
||||
action, index = self._parse_add_file_content(lines, index)
|
||||
action.path = path # Ensure path is set
|
||||
patch.actions[path] = action
|
||||
continue
|
||||
|
||||
# If we are here, the line is unexpected
|
||||
# Allow blank lines between actions
|
||||
if not norm_line.strip():
|
||||
index += 1
|
||||
continue
|
||||
|
||||
raise DiffError(f"Unknown or misplaced line while parsing patch: {line}")
|
||||
|
||||
# Check if we consumed the whole input or stopped early
|
||||
# Tolerate missing "*** End Patch" if we processed actions
|
||||
# if index < len(lines) and _norm(lines[index-1]) != "*** End Patch":
|
||||
# raise DiffError("Patch parsing finished unexpectedly before end of input.")
|
||||
|
||||
patch.fuzz = fuzz_accumulator
|
||||
return patch
|
||||
|
||||
def _parse_update_file_sections(
|
||||
self, lines: List[str], index: int, file_content: str
|
||||
) -> Tuple[PatchAction, int, int]:
|
||||
"""Parses all sections (@@, context, -, +) for a single Update File action."""
|
||||
action = PatchAction(type=ActionType.UPDATE, path="") # Path set by caller
|
||||
orig_lines = file_content.splitlines() # Use splitlines for consistency
|
||||
current_file_index = 0 # Track position in original file content
|
||||
total_fuzz = 0
|
||||
|
||||
while index < len(lines):
|
||||
norm_line = _norm(lines[index])
|
||||
# Check for terminators for *this* file update
|
||||
if norm_line.startswith(
|
||||
(
|
||||
"*** End Patch",
|
||||
"*** Update File:",
|
||||
"*** Delete File:",
|
||||
"*** Add File:",
|
||||
)
|
||||
):
|
||||
break # End of this file's update section
|
||||
|
||||
# Handle @@ scope lines (optional)
|
||||
scope_lines = []
|
||||
while index < len(lines) and _norm(lines[index]).startswith("@@"):
|
||||
scope_line_content = lines[index][len("@@") :].strip()
|
||||
if scope_line_content: # Ignore empty @@ lines?
|
||||
scope_lines.append(scope_line_content)
|
||||
index += 1
|
||||
|
||||
# Find the scope in the original file if specified
|
||||
if scope_lines:
|
||||
# Simple scope finding: search from current position
|
||||
# A more robust finder could handle nested scopes like the reference @@ @@
|
||||
found_scope = False
|
||||
temp_index = current_file_index
|
||||
while temp_index < len(orig_lines):
|
||||
# Check if all scope lines match sequentially from temp_index
|
||||
match = True
|
||||
for i, scope in enumerate(scope_lines):
|
||||
if (
|
||||
temp_index + i >= len(orig_lines)
|
||||
or _norm(orig_lines[temp_index + i]).strip() != scope
|
||||
):
|
||||
match = False
|
||||
break
|
||||
if match:
|
||||
current_file_index = temp_index + len(scope_lines)
|
||||
found_scope = True
|
||||
break
|
||||
temp_index += 1
|
||||
|
||||
if not found_scope:
|
||||
# Try fuzzy scope matching (strip whitespace)
|
||||
temp_index = current_file_index
|
||||
while temp_index < len(orig_lines):
|
||||
match = True
|
||||
for i, scope in enumerate(scope_lines):
|
||||
if (
|
||||
temp_index + i >= len(orig_lines)
|
||||
or _norm(orig_lines[temp_index + i]).strip() != scope.strip()
|
||||
):
|
||||
match = False
|
||||
break
|
||||
if match:
|
||||
current_file_index = temp_index + len(scope_lines)
|
||||
found_scope = True
|
||||
total_fuzz += 1 # Add fuzz for scope match difference
|
||||
break
|
||||
temp_index += 1
|
||||
|
||||
if not found_scope:
|
||||
scope_txt = "\n".join(scope_lines)
|
||||
raise DiffError(f"Could not find scope context:\n{scope_txt}")
|
||||
|
||||
# Peek and parse the next context/change section
|
||||
context_block, chunks_in_section, next_index, is_eof = peek_next_section(lines, index)
|
||||
|
||||
# Find where this context block appears in the original file
|
||||
found_index, fuzz = find_context(orig_lines, context_block, current_file_index, is_eof)
|
||||
total_fuzz += fuzz
|
||||
|
||||
if found_index == -1:
|
||||
ctx_txt = "\n".join(context_block)
|
||||
marker = "*** End of File" if is_eof else ""
|
||||
raise DiffError(
|
||||
f"Could not find patch context {marker} starting near line"
|
||||
f" {current_file_index}:\n{ctx_txt}"
|
||||
)
|
||||
|
||||
# Adjust chunk original indices to be absolute within the file
|
||||
for chunk in chunks_in_section:
|
||||
# chunk.orig_index from peek is relative to context_block start
|
||||
# We need it relative to the file start
|
||||
chunk.orig_index += found_index
|
||||
action.chunks.append(chunk)
|
||||
|
||||
# Advance file index past the matched context block
|
||||
current_file_index = found_index + len(context_block)
|
||||
# Advance line index past the processed section in the patch
|
||||
index = next_index
|
||||
|
||||
return action, index, total_fuzz
|
||||
|
||||
def _parse_add_file_content(self, lines: List[str], index: int) -> Tuple[PatchAction, int]:
|
||||
"""Parses the content (+) lines for an Add File action."""
|
||||
added_lines: List[str] = []
|
||||
while index < len(lines):
|
||||
line = lines[index]
|
||||
norm_line = _norm(line)
|
||||
# Stop if we hit another action or end marker
|
||||
if norm_line.startswith(
|
||||
(
|
||||
"*** End Patch",
|
||||
"*** Update File:",
|
||||
"*** Delete File:",
|
||||
"*** Add File:",
|
||||
)
|
||||
):
|
||||
break
|
||||
|
||||
# Expect lines to start with '+'
|
||||
if not line.startswith("+"):
|
||||
# Tolerate blank lines? Or require '+'? Reference implies '+' required.
|
||||
if norm_line.strip() == "":
|
||||
# Treat blank line as adding a blank line
|
||||
added_lines.append("")
|
||||
else:
|
||||
raise DiffError(f"Invalid Add File line (missing '+'): {line}")
|
||||
else:
|
||||
added_lines.append(line[1:]) # Strip leading '+'
|
||||
|
||||
index += 1
|
||||
|
||||
action = PatchAction(type=ActionType.ADD, path="", new_content="\n".join(added_lines))
|
||||
return action, index
|
||||
|
||||
def apply_edits(self, edits: List[PatchAction]):
|
||||
"""
|
||||
Applies the parsed PatchActions to the corresponding files.
|
||||
"""
|
||||
if not edits:
|
||||
return
|
||||
|
||||
# Group edits by original path? Not strictly needed if processed sequentially.
|
||||
|
||||
# Edits are now List[Tuple[str, PatchAction]]
|
||||
for _path_tuple_element, action in edits:
|
||||
# action is the PatchAction object
|
||||
# action.path is the canonical path within the action logic
|
||||
full_path = self.abs_root_path(action.path)
|
||||
path_obj = pathlib.Path(full_path)
|
||||
|
||||
try:
|
||||
if action.type == ActionType.ADD:
|
||||
# Check existence *before* writing
|
||||
if path_obj.exists():
|
||||
raise DiffError(f"ADD Error: File already exists: {action.path}")
|
||||
if action.new_content is None:
|
||||
# Parser should ensure this doesn't happen
|
||||
raise DiffError(f"ADD change for {action.path} has no content")
|
||||
|
||||
self.io.tool_output(f"Adding {action.path}")
|
||||
path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Ensure single trailing newline, matching reference behavior
|
||||
content_to_write = action.new_content
|
||||
if not content_to_write.endswith("\n"):
|
||||
content_to_write += "\n"
|
||||
self.io.write_text(full_path, content_to_write)
|
||||
|
||||
elif action.type == ActionType.DELETE:
|
||||
self.io.tool_output(f"Deleting {action.path}")
|
||||
if not path_obj.exists():
|
||||
self.io.tool_warning(
|
||||
f"DELETE Warning: File not found, skipping: {action.path}"
|
||||
)
|
||||
else:
|
||||
path_obj.unlink()
|
||||
|
||||
elif action.type == ActionType.UPDATE:
|
||||
if not path_obj.exists():
|
||||
raise DiffError(f"UPDATE Error: File does not exist: {action.path}")
|
||||
|
||||
current_content = self.io.read_text(full_path)
|
||||
if current_content is None:
|
||||
# Should have been caught during parsing if file was needed
|
||||
raise DiffError(f"Could not read file for UPDATE: {action.path}")
|
||||
|
||||
# Apply the update logic using the parsed chunks
|
||||
new_content = self._apply_update(current_content, action, action.path)
|
||||
|
||||
target_full_path = (
|
||||
self.abs_root_path(action.move_path) if action.move_path else full_path
|
||||
)
|
||||
target_path_obj = pathlib.Path(target_full_path)
|
||||
|
||||
if action.move_path:
|
||||
self.io.tool_output(
|
||||
f"Updating and moving {action.path} to {action.move_path}"
|
||||
)
|
||||
# Check if target exists before overwriting/moving
|
||||
if target_path_obj.exists() and full_path != target_full_path:
|
||||
self.io.tool_warning(
|
||||
"UPDATE Warning: Target file for move already exists, overwriting:"
|
||||
f" {action.move_path}"
|
||||
)
|
||||
else:
|
||||
self.io.tool_output(f"Updating {action.path}")
|
||||
|
||||
# Ensure parent directory exists for target
|
||||
target_path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.io.write_text(target_full_path, new_content)
|
||||
|
||||
# Remove original file *after* successful write to new location if moved
|
||||
if action.move_path and full_path != target_full_path:
|
||||
path_obj.unlink()
|
||||
|
||||
else:
|
||||
# Should not happen
|
||||
raise DiffError(f"Unknown action type encountered: {action.type}")
|
||||
|
||||
except (DiffError, FileNotFoundError, IOError, OSError) as e:
|
||||
# Raise a ValueError to signal failure, consistent with other coders.
|
||||
raise ValueError(f"Error applying action '{action.type}' to {action.path}: {e}")
|
||||
except Exception as e:
|
||||
# Catch unexpected errors during application
|
||||
raise ValueError(
|
||||
f"Unexpected error applying action '{action.type}' to {action.path}: {e}"
|
||||
)
|
||||
|
||||
def _apply_update(self, text: str, action: PatchAction, path: str) -> str:
|
||||
"""
|
||||
Applies UPDATE chunks to the given text content.
|
||||
Adapted from _get_updated_file in apply_patch.py.
|
||||
"""
|
||||
if action.type is not ActionType.UPDATE:
|
||||
# Should not be called otherwise, but check for safety
|
||||
raise DiffError("_apply_update called with non-update action")
|
||||
|
||||
orig_lines = text.splitlines() # Use splitlines to handle endings consistently
|
||||
dest_lines: List[str] = []
|
||||
current_orig_line_idx = 0 # Tracks index in orig_lines processed so far
|
||||
|
||||
# Sort chunks by their original index to apply them sequentially
|
||||
sorted_chunks = sorted(action.chunks, key=lambda c: c.orig_index)
|
||||
|
||||
for chunk in sorted_chunks:
|
||||
# chunk.orig_index is the absolute line number where the change starts
|
||||
# (where the first deleted line was, or where inserted lines go if no deletes)
|
||||
chunk_start_index = chunk.orig_index
|
||||
|
||||
if chunk_start_index < current_orig_line_idx:
|
||||
# This indicates overlapping chunks or incorrect indices from parsing
|
||||
raise DiffError(
|
||||
f"{path}: Overlapping or out-of-order chunk detected."
|
||||
f" Current index {current_orig_line_idx}, chunk starts at {chunk_start_index}."
|
||||
)
|
||||
|
||||
# Add lines from original file between the last chunk and this one
|
||||
dest_lines.extend(orig_lines[current_orig_line_idx:chunk_start_index])
|
||||
|
||||
# Verify that the lines to be deleted actually match the original file content
|
||||
# (The parser should have used find_context, but double-check here)
|
||||
num_del = len(chunk.del_lines)
|
||||
actual_deleted_lines = orig_lines[chunk_start_index : chunk_start_index + num_del]
|
||||
|
||||
# Use the same normalization as find_context_core for comparison robustness
|
||||
norm_chunk_del = [_norm(s).strip() for s in chunk.del_lines]
|
||||
norm_actual_del = [_norm(s).strip() for s in actual_deleted_lines]
|
||||
|
||||
if norm_chunk_del != norm_actual_del:
|
||||
# This indicates the context matching failed or the file changed since parsing
|
||||
# Provide detailed error message
|
||||
expected_str = "\n".join(f"- {s}" for s in chunk.del_lines)
|
||||
actual_str = "\n".join(f" {s}" for s in actual_deleted_lines)
|
||||
raise DiffError(
|
||||
f"{path}: Mismatch applying patch near line {chunk_start_index + 1}.\n"
|
||||
f"Expected lines to remove:\n{expected_str}\n"
|
||||
f"Found lines in file:\n{actual_str}"
|
||||
)
|
||||
|
||||
# Add the inserted lines from the chunk
|
||||
dest_lines.extend(chunk.ins_lines)
|
||||
|
||||
# Advance the original line index past the lines processed (deleted lines)
|
||||
current_orig_line_idx = chunk_start_index + num_del
|
||||
|
||||
# Add any remaining lines from the original file after the last chunk
|
||||
dest_lines.extend(orig_lines[current_orig_line_idx:])
|
||||
|
||||
# Join lines and ensure a single trailing newline
|
||||
result = "\n".join(dest_lines)
|
||||
if result or orig_lines: # Add newline unless result is empty and original was empty
|
||||
result += "\n"
|
||||
return result
|
||||
161
aider/coders/patch_prompts.py
Normal file
161
aider/coders/patch_prompts.py
Normal file
@@ -0,0 +1,161 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from .base_prompts import CoderPrompts
|
||||
from .editblock_prompts import EditBlockPrompts
|
||||
|
||||
|
||||
class PatchPrompts(EditBlockPrompts):
|
||||
# --------------------------------------------------------------------- #
|
||||
# SYSTEM PROMPT
|
||||
# --------------------------------------------------------------------- #
|
||||
main_system = """Act as an expert software developer.
|
||||
Always use best practices when coding.
|
||||
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
||||
{final_reminders}
|
||||
Take requests for changes to the supplied code.
|
||||
If the request is ambiguous, ask questions.
|
||||
|
||||
Always reply to the user in {language}.
|
||||
|
||||
Once you understand the request you MUST:
|
||||
|
||||
1. Decide if you need to propose edits to any files that haven't been added to the chat. You can create new files without asking!
|
||||
|
||||
• If you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.
|
||||
• End your reply and wait for their approval.
|
||||
• You can keep asking if you then decide you need to edit more files.
|
||||
|
||||
2. Think step‑by‑step and explain the needed changes in a few short sentences.
|
||||
|
||||
3. Describe the changes using the V4A diff format, enclosed within `*** Begin Patch` and `*** End Patch` markers.
|
||||
|
||||
IMPORTANT: Each file MUST appear only once in the patch.
|
||||
Consolidate **all** edits for a given file into a single `*** [ACTION] File:` block.
|
||||
{shell_cmd_prompt}
|
||||
"""
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# EXAMPLE MESSAGES
|
||||
# --------------------------------------------------------------------- #
|
||||
example_messages = [
|
||||
# --- Example 1 ---------------------------------------------------- #
|
||||
dict(
|
||||
role="user",
|
||||
content="Change get_factorial() to use math.factorial",
|
||||
),
|
||||
dict(
|
||||
role="assistant",
|
||||
content="""To make this change we need to modify `mathweb/flask/app.py`:
|
||||
|
||||
1. Import the `math` module.
|
||||
2. Replace the custom recursive `factorial()` helper with `math.factorial`.
|
||||
3. Update `get_factorial()` to call `math.factorial`.
|
||||
|
||||
Here is the patch:
|
||||
|
||||
*** Begin Patch
|
||||
*** Update File: mathweb/flask/app.py
|
||||
@@
|
||||
-from flask import Flask
|
||||
-
|
||||
-from flask import Flask
|
||||
+from flask import Flask
|
||||
+import math
|
||||
@@
|
||||
-def factorial(n):
|
||||
- "compute factorial"
|
||||
-
|
||||
- if n == 0:
|
||||
- return 1
|
||||
- else:
|
||||
- return n * factorial(n-1)
|
||||
+def factorial(n):
|
||||
+ "compute factorial"
|
||||
+
|
||||
+ return math.factorial(n)
|
||||
@@ def get_factorial(self, n):
|
||||
- return str(factorial(n))
|
||||
+ return str(math.factorial(n))
|
||||
*** End Patch
|
||||
""",
|
||||
),
|
||||
# --- Example 2 ---------------------------------------------------- #
|
||||
dict(
|
||||
role="user",
|
||||
content="Refactor hello() into its own file.",
|
||||
),
|
||||
dict(
|
||||
role="assistant",
|
||||
content="""To make this change we need to modify `main.py` and create a new file `hello.py`:
|
||||
|
||||
1. Create `hello.py` with the `hello()` function.
|
||||
2. Remove `hello()` from `main.py` and import it instead.
|
||||
|
||||
Here is the patch:
|
||||
|
||||
*** Begin Patch
|
||||
*** Add File: hello.py
|
||||
+# A simple function
|
||||
+def hello():
|
||||
+ "print a greeting"
|
||||
+
|
||||
+ print("hello")
|
||||
*** Update File: main.py
|
||||
@@
|
||||
-def hello():
|
||||
- "print a greeting"
|
||||
-
|
||||
- print("hello")
|
||||
+from hello import hello
|
||||
*** End Patch
|
||||
""",
|
||||
),
|
||||
]
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# SYSTEM REMINDER
|
||||
# --------------------------------------------------------------------- #
|
||||
system_reminder = """# V4A Diff Format Rules:
|
||||
|
||||
Your entire response containing the patch MUST start with `*** Begin Patch` on a line by itself.
|
||||
Your entire response containing the patch MUST end with `*** End Patch` on a line by itself.
|
||||
|
||||
Use the *FULL* file path, as shown to you by the user.
|
||||
{quad_backtick_reminder}
|
||||
|
||||
For each file you need to modify, start with a marker line:
|
||||
|
||||
*** [ACTION] File: [path/to/file]
|
||||
|
||||
Where `[ACTION]` is one of `Add`, `Update`, or `Delete`.
|
||||
|
||||
⇨ **Each file MUST appear only once in the patch.**
|
||||
Consolidate all changes for that file into the same block.
|
||||
If you are moving code within a file, include both the deletions and the
|
||||
insertions as separate hunks inside this single `*** Update File:` block
|
||||
(do *not* open a second block for the same file).
|
||||
|
||||
For `Update` actions, describe each snippet of code that needs to be changed using the following format:
|
||||
1. Context lines: Include 3 lines of context *before* the change. These lines MUST start with a single space ` `.
|
||||
2. Lines to remove: Precede each line to be removed with a minus sign `-`.
|
||||
3. Lines to add: Precede each line to be added with a plus sign `+`.
|
||||
4. Context lines: Include 3 lines of context *after* the change. These lines MUST start with a single space ` `.
|
||||
|
||||
Context lines MUST exactly match the existing file content, character for character, including indentation.
|
||||
If a change is near the beginning or end of the file, include fewer than 3 context lines as appropriate.
|
||||
If 3 lines of context is insufficient to uniquely identify the snippet, use `@@ [CLASS_OR_FUNCTION_NAME]` markers on their own lines *before* the context lines to specify the scope. You can use multiple `@@` markers if needed.
|
||||
Do not include line numbers.
|
||||
|
||||
Only create patches for files that the user has added to the chat!
|
||||
|
||||
When moving code *within* a single file, keep everything inside one
|
||||
`*** Update File:` block. Provide one hunk that deletes the code from its
|
||||
original location and another hunk that inserts it at the new location.
|
||||
|
||||
For `Add` actions, use the `*** Add File: [path/to/new/file]` marker, followed by the lines of the new file, each preceded by a plus sign `+`.
|
||||
|
||||
For `Delete` actions, use the `*** Delete File: [path/to/file]` marker. No other lines are needed for the deletion.
|
||||
|
||||
{rename_with_shell}{go_ahead_tip}{final_reminders}ONLY EVER RETURN CODE IN THE SPECIFIED V4A DIFF FORMAT!
|
||||
{shell_cmd_reminder}
|
||||
"""
|
||||
@@ -235,20 +235,6 @@ Left
|
||||
Left
|
||||
"""
|
||||
|
||||
"""
|
||||
ri = RelativeIndenter([example])
|
||||
dump(example)
|
||||
|
||||
rel_example = ri.make_relative(example)
|
||||
dump(repr(rel_example))
|
||||
|
||||
abs_example = ri.make_absolute(rel_example)
|
||||
dump(abs_example)
|
||||
|
||||
|
||||
sys.exit()
|
||||
"""
|
||||
|
||||
|
||||
def relative_indent(texts):
|
||||
ri = RelativeIndenter(texts)
|
||||
@@ -349,7 +335,7 @@ def lines_to_chars(lines, mapping):
|
||||
return new_text
|
||||
|
||||
|
||||
def dmp_lines_apply(texts, remap=True):
|
||||
def dmp_lines_apply(texts):
|
||||
debug = False
|
||||
# debug = True
|
||||
|
||||
@@ -655,8 +641,6 @@ def proc(dname):
|
||||
(dmp_lines_apply, all_preprocs),
|
||||
]
|
||||
|
||||
_strategies = editblock_strategies # noqa: F841
|
||||
|
||||
short_names = dict(
|
||||
search_and_replace="sr",
|
||||
git_cherry_pick_osr_onto_o="cp_o",
|
||||
|
||||
37
aider/coders/shell.py
Normal file
37
aider/coders/shell.py
Normal file
@@ -0,0 +1,37 @@
|
||||
shell_cmd_prompt = """
|
||||
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
|
||||
|
||||
Just suggest shell commands this way, not example code.
|
||||
Only suggest complete shell commands that are ready to execute, without placeholders.
|
||||
Only suggest at most a few shell commands at a time, not more than 1-3, one per line.
|
||||
Do not suggest multi-line shell commands.
|
||||
All shell commands will run from the root directory of the user's project.
|
||||
|
||||
Use the appropriate shell based on the user's system info:
|
||||
{platform}
|
||||
Examples of when to suggest shell commands:
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
""" # noqa
|
||||
|
||||
no_shell_cmd_prompt = """
|
||||
Keep in mind these details about the user's platform and environment:
|
||||
{platform}
|
||||
""" # noqa
|
||||
|
||||
shell_cmd_reminder = """
|
||||
Examples of when to suggest shell commands:
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
|
||||
""" # noqa
|
||||
@@ -45,6 +45,7 @@ other_hunks_applied = (
|
||||
|
||||
class UnifiedDiffCoder(Coder):
|
||||
"""A coder that uses unified diff format for code modifications."""
|
||||
|
||||
edit_format = "udiff"
|
||||
gpt_prompts = UnifiedDiffPrompts()
|
||||
|
||||
@@ -344,7 +345,16 @@ def process_fenced_block(lines, start_line_num):
|
||||
|
||||
if block[0].startswith("--- ") and block[1].startswith("+++ "):
|
||||
# Extract the file path, considering that it might contain spaces
|
||||
fname = block[1][4:].strip()
|
||||
a_fname = block[0][4:].strip()
|
||||
b_fname = block[1][4:].strip()
|
||||
|
||||
# Check if standard git diff prefixes are present (or /dev/null) and strip them
|
||||
if (a_fname.startswith("a/") or a_fname == "/dev/null") and b_fname.startswith("b/"):
|
||||
fname = b_fname[2:]
|
||||
else:
|
||||
# Otherwise, assume the path is as intended
|
||||
fname = b_fname
|
||||
|
||||
block = block[2:]
|
||||
else:
|
||||
fname = None
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from . import shell
|
||||
from .base_prompts import CoderPrompts
|
||||
|
||||
|
||||
class UnifiedDiffPrompts(CoderPrompts):
|
||||
main_system = """Act as an expert software developer.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Always use best practices when coding.
|
||||
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
||||
|
||||
@@ -106,5 +107,9 @@ To move code within a file, use 2 hunks: 1 to delete it from its current locatio
|
||||
|
||||
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
|
||||
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
"""
|
||||
|
||||
shell_cmd_prompt = shell.shell_cmd_prompt
|
||||
no_shell_cmd_prompt = shell.no_shell_cmd_prompt
|
||||
shell_cmd_reminder = shell.shell_cmd_reminder
|
||||
|
||||
14
aider/coders/udiff_simple.py
Normal file
14
aider/coders/udiff_simple.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from .udiff_coder import UnifiedDiffCoder
|
||||
from .udiff_simple_prompts import UnifiedDiffSimplePrompts
|
||||
|
||||
|
||||
class UnifiedDiffSimpleCoder(UnifiedDiffCoder):
|
||||
"""
|
||||
A coder that uses unified diff format for code modifications.
|
||||
This variant uses a simpler prompt that doesn't mention specific
|
||||
diff rules like using `@@ ... @@` lines or avoiding line numbers.
|
||||
"""
|
||||
|
||||
edit_format = "udiff-simple"
|
||||
|
||||
gpt_prompts = UnifiedDiffSimplePrompts()
|
||||
25
aider/coders/udiff_simple_prompts.py
Normal file
25
aider/coders/udiff_simple_prompts.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from .udiff_prompts import UnifiedDiffPrompts
|
||||
|
||||
|
||||
class UnifiedDiffSimplePrompts(UnifiedDiffPrompts):
|
||||
"""
|
||||
Prompts for the UnifiedDiffSimpleCoder.
|
||||
Inherits from UnifiedDiffPrompts and can override specific prompts
|
||||
if a simpler wording is desired for this edit format.
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
|
||||
system_reminder = """# File editing rules:
|
||||
|
||||
Return edits similar to unified diffs that `diff -U0` would produce.
|
||||
|
||||
The user's patch tool needs CORRECT patches that apply cleanly against the current contents of the file!
|
||||
Think carefully and make sure you include and mark all lines that need to be removed or changed as `-` lines.
|
||||
Make sure you mark all new or modified lines with `+`.
|
||||
Don't leave out any lines or the diff patch won't apply correctly.
|
||||
|
||||
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
|
||||
|
||||
{final_reminders}
|
||||
""" # noqa
|
||||
@@ -10,7 +10,7 @@ If the request is ambiguous, ask questions.
|
||||
|
||||
Always reply to the user in {language}.
|
||||
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Once you understand the request you MUST:
|
||||
1. Determine if any code changes are needed.
|
||||
2. Explain any needed changes.
|
||||
@@ -61,7 +61,7 @@ To suggest changes to a file you MUST return a *file listing* that contains the
|
||||
*NEVER* skip, omit or elide content from a *file listing* using "..." or by adding comments like "... rest of code..."!
|
||||
Create a new file you MUST return a *file listing* which includes an appropriate filename, including any appropriate path.
|
||||
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
"""
|
||||
|
||||
redacted_edit_message = "No changes are needed."
|
||||
|
||||
@@ -47,6 +47,7 @@ class Commands:
|
||||
parser=self.parser,
|
||||
verbose=self.verbose,
|
||||
editor=self.editor,
|
||||
original_read_only_fnames=self.original_read_only_fnames,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
@@ -220,12 +221,18 @@ class Commands:
|
||||
|
||||
self.io.tool_output(f"Scraping {url}...")
|
||||
if not self.scraper:
|
||||
res = install_playwright(self.io)
|
||||
if not res:
|
||||
self.io.tool_warning("Unable to initialize playwright.")
|
||||
disable_playwright = getattr(self.args, "disable_playwright", False)
|
||||
if disable_playwright:
|
||||
res = False
|
||||
else:
|
||||
res = install_playwright(self.io)
|
||||
if not res:
|
||||
self.io.tool_warning("Unable to initialize playwright.")
|
||||
|
||||
self.scraper = Scraper(
|
||||
print_error=self.io.tool_error, playwright_available=res, verify_ssl=self.verify_ssl
|
||||
print_error=self.io.tool_error,
|
||||
playwright_available=res,
|
||||
verify_ssl=self.verify_ssl,
|
||||
)
|
||||
|
||||
content = self.scraper.scrape(url) or ""
|
||||
|
||||
@@ -11,7 +11,7 @@ from aider.coders import Coder
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import InputOutput
|
||||
from aider.main import main as cli_main
|
||||
from aider.scrape import Scraper
|
||||
from aider.scrape import Scraper, has_playwright
|
||||
|
||||
|
||||
class CaptureIO(InputOutput):
|
||||
@@ -484,7 +484,7 @@ class GUI:
|
||||
url = self.web_content
|
||||
|
||||
if not self.state.scraper:
|
||||
self.scraper = Scraper(print_error=self.info)
|
||||
self.scraper = Scraper(print_error=self.info, playwright_available=has_playwright())
|
||||
|
||||
content = self.scraper.scrape(url) or ""
|
||||
if content.strip():
|
||||
|
||||
11
aider/io.py
11
aider/io.py
@@ -595,7 +595,7 @@ class InputOutput:
|
||||
current_text = buffer.text
|
||||
|
||||
# Open the editor with the current text
|
||||
edited_text = pipe_editor(input_data=current_text)
|
||||
edited_text = pipe_editor(input_data=current_text, suffix="md")
|
||||
|
||||
# Replace the buffer with the edited text, strip any trailing newlines
|
||||
buffer.text = edited_text.rstrip("\n")
|
||||
@@ -1144,18 +1144,19 @@ class InputOutput:
|
||||
ro_paths = []
|
||||
for rel_path in read_only_files:
|
||||
abs_path = os.path.abspath(os.path.join(self.root, rel_path))
|
||||
ro_paths.append(abs_path if len(abs_path) < len(rel_path) else rel_path)
|
||||
ro_paths.append(Text(abs_path if len(abs_path) < len(rel_path) else rel_path))
|
||||
|
||||
files_with_label = ["Readonly:"] + ro_paths
|
||||
files_with_label = [Text("Readonly:")] + ro_paths
|
||||
read_only_output = StringIO()
|
||||
Console(file=read_only_output, force_terminal=False).print(Columns(files_with_label))
|
||||
read_only_lines = read_only_output.getvalue().splitlines()
|
||||
console.print(Columns(files_with_label))
|
||||
|
||||
if editable_files:
|
||||
files_with_label = editable_files
|
||||
text_editable_files = [Text(f) for f in editable_files]
|
||||
files_with_label = text_editable_files
|
||||
if read_only_files:
|
||||
files_with_label = ["Editable:"] + editable_files
|
||||
files_with_label = [Text("Editable:")] + text_editable_files
|
||||
editable_output = StringIO()
|
||||
Console(file=editable_output, force_terminal=False).print(Columns(files_with_label))
|
||||
editable_lines = editable_output.getvalue().splitlines()
|
||||
|
||||
@@ -4,10 +4,10 @@ import subprocess
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
import shlex
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import oslex
|
||||
from grep_ast import TreeContext, filename_to_lang
|
||||
from grep_ast.tsl import get_parser # noqa: E402
|
||||
|
||||
@@ -45,7 +45,7 @@ class Linter:
|
||||
return fname
|
||||
|
||||
def run_cmd(self, cmd, rel_fname, code):
|
||||
cmd += " " + shlex.quote(rel_fname)
|
||||
cmd += " " + oslex.quote(rel_fname)
|
||||
|
||||
returncode = 0
|
||||
stdout = ""
|
||||
|
||||
@@ -14,6 +14,7 @@ except ImportError:
|
||||
git = None
|
||||
|
||||
import importlib_resources
|
||||
import shtab
|
||||
from dotenv import load_dotenv
|
||||
from prompt_toolkit.enums import EditingMode
|
||||
|
||||
@@ -502,6 +503,12 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
# Parse again to include any arguments that might have been defined in .env
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
if args.shell_completions:
|
||||
# Ensure parser.prog is set for shtab, though it should be by default
|
||||
parser.prog = "aider"
|
||||
print(shtab.complete(parser, shell=args.shell_completions))
|
||||
sys.exit(0)
|
||||
|
||||
if git is None:
|
||||
args.git = False
|
||||
|
||||
@@ -857,7 +864,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
)
|
||||
|
||||
if args.copy_paste and args.edit_format is None:
|
||||
if main_model.edit_format in ("diff", "whole"):
|
||||
if main_model.edit_format in ("diff", "whole", "diff-fenced"):
|
||||
main_model.edit_format = "editor-" + main_model.edit_format
|
||||
|
||||
if args.verbose:
|
||||
@@ -904,6 +911,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
commit_prompt=args.commit_prompt,
|
||||
subtree_only=args.subtree_only,
|
||||
git_commit_verify=args.git_commit_verify,
|
||||
attribute_co_authored_by=args.attribute_co_authored_by, # Pass the arg
|
||||
)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
@@ -115,9 +115,9 @@ class MarkdownStream:
|
||||
else:
|
||||
self.mdargs = dict()
|
||||
|
||||
# Initialize rich Live display with empty text
|
||||
self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
|
||||
self.live.start()
|
||||
# Defer Live creation until the first update.
|
||||
self.live = None
|
||||
self._live_started = False
|
||||
|
||||
def _render_markdown_to_lines(self, text):
|
||||
"""Render markdown text to a list of lines.
|
||||
@@ -163,6 +163,12 @@ class MarkdownStream:
|
||||
Markdown going to the console works better in terminal scrollback buffers.
|
||||
The live window doesn't play nice with terminal scrollback.
|
||||
"""
|
||||
# On the first call, stop the spinner and start the Live renderer
|
||||
if not getattr(self, "_live_started", False):
|
||||
self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
|
||||
self.live.start()
|
||||
self._live_started = True
|
||||
|
||||
now = time.time()
|
||||
# Throttle updates to maintain smooth rendering
|
||||
if not final and now - self.when < self.min_delay:
|
||||
|
||||
154
aider/models.py
154
aider/models.py
@@ -88,11 +88,11 @@ MODEL_ALIASES = {
|
||||
"3": "gpt-3.5-turbo",
|
||||
# Other models
|
||||
"deepseek": "deepseek/deepseek-chat",
|
||||
"flash": "gemini/gemini-2.0-flash-exp",
|
||||
"flash": "gemini/gemini-2.5-flash-preview-04-17",
|
||||
"quasar": "openrouter/openrouter/quasar-alpha",
|
||||
"r1": "deepseek/deepseek-reasoner",
|
||||
"gemini-2.5-pro": "gemini/gemini-2.5-pro-exp-03-25",
|
||||
"gemini": "gemini/gemini-2.5-pro-preview-03-25",
|
||||
"gemini-2.5-pro": "gemini/gemini-2.5-pro-preview-05-06",
|
||||
"gemini": "gemini/gemini-2.5-pro-preview-05-06",
|
||||
"gemini-exp": "gemini/gemini-2.5-pro-exp-03-25",
|
||||
"grok3": "xai/grok-3-beta",
|
||||
"optimus": "openrouter/openrouter/optimus-alpha",
|
||||
@@ -231,8 +231,62 @@ class ModelInfoManager:
|
||||
if litellm_info:
|
||||
return litellm_info
|
||||
|
||||
if not cached_info and model.startswith("openrouter/"):
|
||||
openrouter_info = self.fetch_openrouter_model_info(model)
|
||||
if openrouter_info:
|
||||
return openrouter_info
|
||||
|
||||
return cached_info
|
||||
|
||||
def fetch_openrouter_model_info(self, model):
|
||||
"""
|
||||
Fetch model info by scraping the openrouter model page.
|
||||
Expected URL: https://openrouter.ai/<model_route>
|
||||
Example: openrouter/qwen/qwen-2.5-72b-instruct:free
|
||||
Returns a dict with keys: max_tokens, max_input_tokens, max_output_tokens,
|
||||
input_cost_per_token, output_cost_per_token.
|
||||
"""
|
||||
url_part = model[len("openrouter/") :]
|
||||
url = "https://openrouter.ai/" + url_part
|
||||
try:
|
||||
import requests
|
||||
|
||||
response = requests.get(url, timeout=5, verify=self.verify_ssl)
|
||||
if response.status_code != 200:
|
||||
return {}
|
||||
html = response.text
|
||||
import re
|
||||
|
||||
if re.search(
|
||||
rf"The model\s*.*{re.escape(url_part)}.* is not available", html, re.IGNORECASE
|
||||
):
|
||||
print(f"\033[91mError: Model '{url_part}' is not available\033[0m")
|
||||
return {}
|
||||
text = re.sub(r"<[^>]+>", " ", html)
|
||||
context_match = re.search(r"([\d,]+)\s*context", text)
|
||||
if context_match:
|
||||
context_str = context_match.group(1).replace(",", "")
|
||||
context_size = int(context_str)
|
||||
else:
|
||||
context_size = None
|
||||
input_cost_match = re.search(r"\$\s*([\d.]+)\s*/M input tokens", text, re.IGNORECASE)
|
||||
output_cost_match = re.search(r"\$\s*([\d.]+)\s*/M output tokens", text, re.IGNORECASE)
|
||||
input_cost = float(input_cost_match.group(1)) / 1000000 if input_cost_match else None
|
||||
output_cost = float(output_cost_match.group(1)) / 1000000 if output_cost_match else None
|
||||
if context_size is None or input_cost is None or output_cost is None:
|
||||
return {}
|
||||
params = {
|
||||
"max_input_tokens": context_size,
|
||||
"max_tokens": context_size,
|
||||
"max_output_tokens": context_size,
|
||||
"input_cost_per_token": input_cost,
|
||||
"output_cost_per_token": output_cost,
|
||||
}
|
||||
return params
|
||||
except Exception as e:
|
||||
print("Error fetching openrouter info:", str(e))
|
||||
return {}
|
||||
|
||||
|
||||
model_info_manager = ModelInfoManager()
|
||||
|
||||
@@ -314,7 +368,11 @@ class Model(ModelSettings):
|
||||
self.apply_generic_model_settings(model)
|
||||
|
||||
# Apply override settings last if they exist
|
||||
if self.extra_model_settings and self.extra_model_settings.extra_params:
|
||||
if (
|
||||
self.extra_model_settings
|
||||
and self.extra_model_settings.extra_params
|
||||
and self.extra_model_settings.name == "aider/extra_params"
|
||||
):
|
||||
# Initialize extra_params if it doesn't exist
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
@@ -328,16 +386,40 @@ class Model(ModelSettings):
|
||||
# For non-dict values, simply update
|
||||
self.extra_params[key] = value
|
||||
|
||||
# Ensure OpenRouter models accept thinking_tokens and reasoning_effort
|
||||
if self.name.startswith("openrouter/"):
|
||||
if self.accepts_settings is None:
|
||||
self.accepts_settings = []
|
||||
if "thinking_tokens" not in self.accepts_settings:
|
||||
self.accepts_settings.append("thinking_tokens")
|
||||
if "reasoning_effort" not in self.accepts_settings:
|
||||
self.accepts_settings.append("reasoning_effort")
|
||||
|
||||
def apply_generic_model_settings(self, model):
|
||||
if "/o3-mini" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.use_temperature = False
|
||||
self.system_prompt_prefix = "Formatting re-enabled. "
|
||||
self.system_prompt_prefix = "Formatting re-enabled. "
|
||||
if "reasoning_effort" not in self.accepts_settings:
|
||||
self.accepts_settings.append("reasoning_effort")
|
||||
return # <--
|
||||
|
||||
if "gpt-4.1-mini" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.reminder = "sys"
|
||||
self.examples_as_sys_msg = False
|
||||
return # <--
|
||||
|
||||
if "gpt-4.1" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.reminder = "sys"
|
||||
self.examples_as_sys_msg = False
|
||||
return # <--
|
||||
|
||||
if "/o1-mini" in model:
|
||||
self.use_repo_map = True
|
||||
self.use_temperature = False
|
||||
@@ -441,6 +523,14 @@ class Model(ModelSettings):
|
||||
self.extra_params = dict(top_p=0.95)
|
||||
return # <--
|
||||
|
||||
if "qwen3" in model and "235b" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.system_prompt_prefix = "/no_think"
|
||||
self.use_temperature = 0.7
|
||||
self.extra_params = {"top_p": 0.8, "top_k": 20, "min_p": 0.0}
|
||||
return # <--
|
||||
|
||||
# use the defaults
|
||||
if self.edit_format == "diff":
|
||||
self.use_repo_map = True
|
||||
@@ -488,6 +578,8 @@ class Model(ModelSettings):
|
||||
|
||||
if not self.editor_edit_format:
|
||||
self.editor_edit_format = self.editor_model.edit_format
|
||||
if self.editor_edit_format in ("diff", "whole", "diff-fenced"):
|
||||
self.editor_edit_format = "editor-" + self.editor_edit_format
|
||||
|
||||
return self.editor_model
|
||||
|
||||
@@ -638,11 +730,18 @@ class Model(ModelSettings):
|
||||
def set_reasoning_effort(self, effort):
|
||||
"""Set the reasoning effort parameter for models that support it"""
|
||||
if effort is not None:
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning_effort"] = effort
|
||||
if self.name.startswith("openrouter/"):
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning"] = {"effort": effort}
|
||||
else:
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning_effort"] = effort
|
||||
|
||||
def parse_token_value(self, value):
|
||||
"""
|
||||
@@ -688,7 +787,9 @@ class Model(ModelSettings):
|
||||
|
||||
# OpenRouter models use 'reasoning' instead of 'thinking'
|
||||
if self.name.startswith("openrouter/"):
|
||||
self.extra_params["reasoning"] = {"max_tokens": num_tokens}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning"] = {"max_tokens": num_tokens}
|
||||
else:
|
||||
self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num_tokens}
|
||||
|
||||
@@ -698,8 +799,13 @@ class Model(ModelSettings):
|
||||
|
||||
if self.extra_params:
|
||||
# Check for OpenRouter reasoning format
|
||||
if "reasoning" in self.extra_params and "max_tokens" in self.extra_params["reasoning"]:
|
||||
budget = self.extra_params["reasoning"]["max_tokens"]
|
||||
if self.name.startswith("openrouter/"):
|
||||
if (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning" in self.extra_params["extra_body"]
|
||||
and "max_tokens" in self.extra_params["extra_body"]["reasoning"]
|
||||
):
|
||||
budget = self.extra_params["extra_body"]["reasoning"]["max_tokens"]
|
||||
# Check for standard thinking format
|
||||
elif (
|
||||
"thinking" in self.extra_params and "budget_tokens" in self.extra_params["thinking"]
|
||||
@@ -729,12 +835,21 @@ class Model(ModelSettings):
|
||||
|
||||
def get_reasoning_effort(self):
|
||||
"""Get reasoning effort value if available"""
|
||||
if (
|
||||
self.extra_params
|
||||
and "extra_body" in self.extra_params
|
||||
and "reasoning_effort" in self.extra_params["extra_body"]
|
||||
):
|
||||
return self.extra_params["extra_body"]["reasoning_effort"]
|
||||
if self.extra_params:
|
||||
# Check for OpenRouter reasoning format
|
||||
if self.name.startswith("openrouter/"):
|
||||
if (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning" in self.extra_params["extra_body"]
|
||||
and "effort" in self.extra_params["extra_body"]["reasoning"]
|
||||
):
|
||||
return self.extra_params["extra_body"]["reasoning"]["effort"]
|
||||
# Check for standard reasoning_effort format (e.g. in extra_body)
|
||||
elif (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning_effort" in self.extra_params["extra_body"]
|
||||
):
|
||||
return self.extra_params["extra_body"]["reasoning_effort"]
|
||||
return None
|
||||
|
||||
def is_deepseek_r1(self):
|
||||
@@ -798,6 +913,9 @@ class Model(ModelSettings):
|
||||
messages = ensure_alternating_roles(messages)
|
||||
retry_delay = 0.125
|
||||
|
||||
if self.verbose:
|
||||
dump(messages)
|
||||
|
||||
while True:
|
||||
try:
|
||||
kwargs = {
|
||||
|
||||
@@ -13,11 +13,13 @@ Generate a one-line commit message for those changes.
|
||||
The commit message should be structured as follows: <type>: <description>
|
||||
Use these for <type>: fix, feat, build, chore, ci, docs, style, refactor, perf, test
|
||||
|
||||
Ensure the commit message:
|
||||
Ensure the commit message:{language_instruction}
|
||||
- Starts with the appropriate prefix.
|
||||
- Is in the imperative mood (e.g., \"add feature\" not \"added feature\" or \"adding feature\").
|
||||
- Does not exceed 72 characters.
|
||||
|
||||
Reply only with the one-line commit message, without any additional text, explanations, or line breaks.
|
||||
|
||||
Reply only with the one-line commit message, without any additional text, explanations, \
|
||||
or line breaks.
|
||||
"""
|
||||
|
||||
115
aider/queries/tree-sitter-language-pack/ocaml-tags.scm
Normal file
115
aider/queries/tree-sitter-language-pack/ocaml-tags.scm
Normal file
@@ -0,0 +1,115 @@
|
||||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition (module_binding (module_name) @name.definition.module) @definition.module)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name.reference.module) @reference.module
|
||||
|
||||
; Module types
|
||||
;--------------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name.definition.interface) @definition.interface
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name.reference.implementation) @reference.implementation
|
||||
|
||||
; Functions
|
||||
;----------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_definition
|
||||
[
|
||||
(let_binding
|
||||
pattern: (value_name) @name.definition.function
|
||||
(parameter))
|
||||
(let_binding
|
||||
pattern: (value_name) @name.definition.function
|
||||
body: [(fun_expression) (function_expression)])
|
||||
] @definition.function
|
||||
)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name.definition.function) @definition.function
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(application_expression
|
||||
function: (value_path (value_name) @name.reference.call)) @reference.call
|
||||
|
||||
(infix_expression
|
||||
left: (value_path (value_name) @name.reference.call)
|
||||
operator: (concat_operator) @reference.call
|
||||
(#eq? @reference.call "@@"))
|
||||
|
||||
(infix_expression
|
||||
operator: (rel_operator) @reference.call
|
||||
right: (value_path (value_name) @name.reference.call)
|
||||
(#eq? @reference.call "|>"))
|
||||
|
||||
; Operator
|
||||
;---------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_definition
|
||||
(let_binding
|
||||
pattern: (parenthesized_operator (_) @name.definition.function)) @definition.function)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(prefix_operator)
|
||||
(sign_operator)
|
||||
(pow_operator)
|
||||
(mult_operator)
|
||||
(add_operator)
|
||||
(concat_operator)
|
||||
(rel_operator)
|
||||
(and_operator)
|
||||
(or_operator)
|
||||
(assign_operator)
|
||||
(hash_operator)
|
||||
(indexing_operator)
|
||||
(let_operator)
|
||||
(let_and_operator)
|
||||
(match_operator)
|
||||
] @name.reference.call @reference.call
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition (class_binding (class_name) @name.definition.class) @definition.class)
|
||||
(class_type_definition (class_type_binding (class_type_name) @name.definition.class) @definition.class)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name.reference.class)
|
||||
(class_type_path (class_type_name) @name.reference.class)
|
||||
] @reference.class
|
||||
|
||||
; Methods
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name.definition.method) @definition.method
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name.reference.call) @reference.call
|
||||
@@ -0,0 +1,98 @@
|
||||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition
|
||||
(module_binding (module_name) @name) @definition.module
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name) @reference.module
|
||||
(extended_module_path (module_name) @name) @reference.module
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name) @definition.interface
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name) @reference.implementation
|
||||
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition
|
||||
(class_binding (class_name) @name) @definition.class
|
||||
)
|
||||
(class_type_definition
|
||||
(class_type_binding (class_type_name) @name) @definition.class
|
||||
)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name)
|
||||
(class_type_path (class_type_name) @name)
|
||||
] @reference.class
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name) @definition.method
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name) @reference.call
|
||||
|
||||
|
||||
; Types
|
||||
;------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(type_definition
|
||||
(type_binding
|
||||
name: [
|
||||
(type_constructor) @name
|
||||
(type_constructor_path (type_constructor) @name)
|
||||
]
|
||||
) @definition.type
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(type_constructor_path (type_constructor) @name) @reference.type
|
||||
|
||||
[
|
||||
(constructor_declaration (constructor_name) @name)
|
||||
(tag_specification (tag) @name)
|
||||
] @definition.enum_variant
|
||||
|
||||
[
|
||||
(constructor_path (constructor_name) @name)
|
||||
(tag) @name
|
||||
] @reference.enum_variant
|
||||
|
||||
(field_declaration (field_name) @name) @definition.field
|
||||
|
||||
(field_path (field_name) @name) @reference.field
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_specification
|
||||
(value_name) @name.definition.function
|
||||
) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
98
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
Normal file
98
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
Normal file
@@ -0,0 +1,98 @@
|
||||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition
|
||||
(module_binding (module_name) @name) @definition.module
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name) @reference.module
|
||||
(extended_module_path (module_name) @name) @reference.module
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name) @definition.interface
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name) @reference.implementation
|
||||
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition
|
||||
(class_binding (class_name) @name) @definition.class
|
||||
)
|
||||
(class_type_definition
|
||||
(class_type_binding (class_type_name) @name) @definition.class
|
||||
)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name)
|
||||
(class_type_path (class_type_name) @name)
|
||||
] @reference.class
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name) @definition.method
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name) @reference.call
|
||||
|
||||
|
||||
; Types
|
||||
;------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(type_definition
|
||||
(type_binding
|
||||
name: [
|
||||
(type_constructor) @name
|
||||
(type_constructor_path (type_constructor) @name)
|
||||
]
|
||||
) @definition.type
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(type_constructor_path (type_constructor) @name) @reference.type
|
||||
|
||||
[
|
||||
(constructor_declaration (constructor_name) @name)
|
||||
(tag_specification (tag) @name)
|
||||
] @definition.enum_variant
|
||||
|
||||
[
|
||||
(constructor_path (constructor_name) @name)
|
||||
(tag) @name
|
||||
] @reference.enum_variant
|
||||
|
||||
(field_declaration (field_name) @name) @definition.field
|
||||
|
||||
(field_path (field_name) @name) @reference.field
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_specification
|
||||
(value_name) @name.definition.function
|
||||
) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
208
aider/repo.py
208
aider/repo.py
@@ -1,3 +1,4 @@
|
||||
import contextlib
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path, PurePosixPath
|
||||
@@ -34,6 +35,19 @@ ANY_GIT_ERROR += [
|
||||
ANY_GIT_ERROR = tuple(ANY_GIT_ERROR)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_git_env(var_name, value, original_value):
|
||||
"""Temporarily set a Git environment variable."""
|
||||
os.environ[var_name] = value
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if original_value is not None:
|
||||
os.environ[var_name] = original_value
|
||||
elif var_name in os.environ:
|
||||
del os.environ[var_name]
|
||||
|
||||
|
||||
class GitRepo:
|
||||
repo = None
|
||||
aider_ignore_file = None
|
||||
@@ -58,6 +72,7 @@ class GitRepo:
|
||||
commit_prompt=None,
|
||||
subtree_only=False,
|
||||
git_commit_verify=True,
|
||||
attribute_co_authored_by=False, # Added parameter
|
||||
):
|
||||
self.io = io
|
||||
self.models = models
|
||||
@@ -69,6 +84,7 @@ class GitRepo:
|
||||
self.attribute_committer = attribute_committer
|
||||
self.attribute_commit_message_author = attribute_commit_message_author
|
||||
self.attribute_commit_message_committer = attribute_commit_message_committer
|
||||
self.attribute_co_authored_by = attribute_co_authored_by # Assign from parameter
|
||||
self.commit_prompt = commit_prompt
|
||||
self.subtree_only = subtree_only
|
||||
self.git_commit_verify = git_commit_verify
|
||||
@@ -111,7 +127,76 @@ class GitRepo:
|
||||
if aider_ignore_file:
|
||||
self.aider_ignore_file = Path(aider_ignore_file)
|
||||
|
||||
def commit(self, fnames=None, context=None, message=None, aider_edits=False):
|
||||
def commit(self, fnames=None, context=None, message=None, aider_edits=False, coder=None):
|
||||
"""
|
||||
Commit the specified files or all dirty files if none are specified.
|
||||
|
||||
Args:
|
||||
fnames (list, optional): List of filenames to commit. Defaults to None (commit all
|
||||
dirty files).
|
||||
context (str, optional): Context for generating commit message. Defaults to None.
|
||||
message (str, optional): Explicit commit message. Defaults to None (generate message).
|
||||
aider_edits (bool, optional): Whether the changes were made by Aider. Defaults to False.
|
||||
This affects attribution logic.
|
||||
coder (Coder, optional): The Coder instance, used for config and model info.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
tuple(str, str) or None: The commit hash and commit message if successful,
|
||||
else None.
|
||||
|
||||
Attribution Logic:
|
||||
------------------
|
||||
This method handles Git commit attribution based on configuration flags and whether
|
||||
Aider generated the changes (`aider_edits`).
|
||||
|
||||
Key Concepts:
|
||||
- Author: The person who originally wrote the code changes.
|
||||
- Committer: The person who last applied the commit to the repository.
|
||||
- aider_edits=True: Changes were generated by Aider (LLM).
|
||||
- aider_edits=False: Commit is user-driven (e.g., /commit manually staged changes).
|
||||
- Explicit Setting: A flag (--attribute-...) is set to True or False
|
||||
via command line or config file.
|
||||
- Implicit Default: A flag is not explicitly set, defaulting to None in args, which is
|
||||
interpreted as True unless overridden by other logic.
|
||||
|
||||
Flags:
|
||||
- --attribute-author: Modify Author name to "User Name (aider)".
|
||||
- --attribute-committer: Modify Committer name to "User Name (aider)".
|
||||
- --attribute-co-authored-by: Add
|
||||
"Co-authored-by: aider (<model>) <noreply@aider.chat>" trailer to commit message.
|
||||
|
||||
Behavior Summary:
|
||||
|
||||
1. When aider_edits = True (AI Changes):
|
||||
- If --attribute-co-authored-by=True:
|
||||
- Co-authored-by trailer IS ADDED.
|
||||
- Author/Committer names are NOT modified by default (co-authored-by takes precedence).
|
||||
- EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY True, the
|
||||
respective name IS modified (explicit overrides precedence).
|
||||
- If --attribute-co-authored-by=False:
|
||||
- Co-authored-by trailer is NOT added.
|
||||
- Author/Committer names ARE modified by default (implicit True).
|
||||
- EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY False,
|
||||
the respective name is NOT modified.
|
||||
|
||||
2. When aider_edits = False (User Changes):
|
||||
- --attribute-co-authored-by is IGNORED (trailer never added).
|
||||
- Author name is NEVER modified (--attribute-author ignored).
|
||||
- Committer name IS modified by default (implicit True, as Aider runs `git commit`).
|
||||
- EXCEPTION: If --attribute-committer is EXPLICITLY False, the name is NOT modified.
|
||||
|
||||
Resulting Scenarios:
|
||||
- Standard AI edit (defaults): Co-authored-by=False -> Author=You(aider),
|
||||
Committer=You(aider)
|
||||
- AI edit with Co-authored-by (default): Co-authored-by=True -> Author=You,
|
||||
Committer=You, Trailer added
|
||||
- AI edit with Co-authored-by + Explicit Author: Co-authored-by=True,
|
||||
--attribute-author -> Author=You(aider), Committer=You, Trailer added
|
||||
- User commit (defaults): aider_edits=False -> Author=You, Committer=You(aider)
|
||||
- User commit with explicit no-committer: aider_edits=False,
|
||||
--no-attribute-committer -> Author=You, Committer=You
|
||||
"""
|
||||
if not fnames and not self.repo.is_dirty():
|
||||
return
|
||||
|
||||
@@ -122,19 +207,71 @@ class GitRepo:
|
||||
if message:
|
||||
commit_message = message
|
||||
else:
|
||||
commit_message = self.get_commit_message(diffs, context)
|
||||
user_language = None
|
||||
if coder:
|
||||
user_language = coder.get_user_language()
|
||||
commit_message = self.get_commit_message(diffs, context, user_language)
|
||||
|
||||
if aider_edits and self.attribute_commit_message_author:
|
||||
commit_message = "aider: " + commit_message
|
||||
elif self.attribute_commit_message_committer:
|
||||
commit_message = "aider: " + commit_message
|
||||
# Retrieve attribute settings, prioritizing coder.args if available
|
||||
if coder and hasattr(coder, "args"):
|
||||
attribute_author = coder.args.attribute_author
|
||||
attribute_committer = coder.args.attribute_committer
|
||||
attribute_commit_message_author = coder.args.attribute_commit_message_author
|
||||
attribute_commit_message_committer = coder.args.attribute_commit_message_committer
|
||||
attribute_co_authored_by = coder.args.attribute_co_authored_by
|
||||
else:
|
||||
# Fallback to self attributes (initialized from config/defaults)
|
||||
attribute_author = self.attribute_author
|
||||
attribute_committer = self.attribute_committer
|
||||
attribute_commit_message_author = self.attribute_commit_message_author
|
||||
attribute_commit_message_committer = self.attribute_commit_message_committer
|
||||
attribute_co_authored_by = self.attribute_co_authored_by
|
||||
|
||||
# Determine explicit settings (None means use default behavior)
|
||||
author_explicit = attribute_author is not None
|
||||
committer_explicit = attribute_committer is not None
|
||||
|
||||
# Determine effective settings (apply default True if not explicit)
|
||||
effective_author = True if attribute_author is None else attribute_author
|
||||
effective_committer = True if attribute_committer is None else attribute_committer
|
||||
|
||||
# Determine commit message prefixing
|
||||
prefix_commit_message = aider_edits and (
|
||||
attribute_commit_message_author or attribute_commit_message_committer
|
||||
)
|
||||
|
||||
# Determine Co-authored-by trailer
|
||||
commit_message_trailer = ""
|
||||
if aider_edits and attribute_co_authored_by:
|
||||
model_name = "unknown-model"
|
||||
if coder and hasattr(coder, "main_model") and coder.main_model.name:
|
||||
model_name = coder.main_model.name
|
||||
commit_message_trailer = (
|
||||
f"\n\nCo-authored-by: aider ({model_name}) <noreply@aider.chat>"
|
||||
)
|
||||
|
||||
# Determine if author/committer names should be modified
|
||||
# Author modification applies only to aider edits.
|
||||
# It's used if effective_author is True AND
|
||||
# (co-authored-by is False OR author was explicitly set).
|
||||
use_attribute_author = (
|
||||
aider_edits and effective_author and (not attribute_co_authored_by or author_explicit)
|
||||
)
|
||||
|
||||
# Committer modification applies regardless of aider_edits (based on tests).
|
||||
# It's used if effective_committer is True AND
|
||||
# (it's not an aider edit with co-authored-by OR committer was explicitly set).
|
||||
use_attribute_committer = effective_committer and (
|
||||
not (aider_edits and attribute_co_authored_by) or committer_explicit
|
||||
)
|
||||
|
||||
if not commit_message:
|
||||
commit_message = "(no commit message provided)"
|
||||
|
||||
full_commit_message = commit_message
|
||||
# if context:
|
||||
# full_commit_message += "\n\n# Aider chat conversation:\n\n" + context
|
||||
if prefix_commit_message:
|
||||
commit_message = "aider: " + commit_message
|
||||
|
||||
full_commit_message = commit_message + commit_message_trailer
|
||||
|
||||
cmd = ["-m", full_commit_message]
|
||||
if not self.git_commit_verify:
|
||||
@@ -152,36 +289,32 @@ class GitRepo:
|
||||
|
||||
original_user_name = self.repo.git.config("--get", "user.name")
|
||||
original_committer_name_env = os.environ.get("GIT_COMMITTER_NAME")
|
||||
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
committer_name = f"{original_user_name} (aider)"
|
||||
|
||||
if self.attribute_committer:
|
||||
os.environ["GIT_COMMITTER_NAME"] = committer_name
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
os.environ["GIT_AUTHOR_NAME"] = committer_name
|
||||
|
||||
try:
|
||||
self.repo.git.commit(cmd)
|
||||
commit_hash = self.get_head_commit_sha(short=True)
|
||||
self.io.tool_output(f"Commit {commit_hash} {commit_message}", bold=True)
|
||||
return commit_hash, commit_message
|
||||
# Use context managers to handle environment variables
|
||||
with contextlib.ExitStack() as stack:
|
||||
if use_attribute_committer:
|
||||
stack.enter_context(
|
||||
set_git_env(
|
||||
"GIT_COMMITTER_NAME", committer_name, original_committer_name_env
|
||||
)
|
||||
)
|
||||
if use_attribute_author:
|
||||
stack.enter_context(
|
||||
set_git_env("GIT_AUTHOR_NAME", committer_name, original_author_name_env)
|
||||
)
|
||||
|
||||
# Perform the commit
|
||||
self.repo.git.commit(cmd)
|
||||
commit_hash = self.get_head_commit_sha(short=True)
|
||||
self.io.tool_output(f"Commit {commit_hash} {commit_message}", bold=True)
|
||||
return commit_hash, commit_message
|
||||
|
||||
except ANY_GIT_ERROR as err:
|
||||
self.io.tool_error(f"Unable to commit: {err}")
|
||||
finally:
|
||||
# Restore the env
|
||||
|
||||
if self.attribute_committer:
|
||||
if original_committer_name_env is not None:
|
||||
os.environ["GIT_COMMITTER_NAME"] = original_committer_name_env
|
||||
else:
|
||||
del os.environ["GIT_COMMITTER_NAME"]
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
if original_author_name_env is not None:
|
||||
os.environ["GIT_AUTHOR_NAME"] = original_author_name_env
|
||||
else:
|
||||
del os.environ["GIT_AUTHOR_NAME"]
|
||||
# No return here, implicitly returns None
|
||||
|
||||
def get_rel_repo_dir(self):
|
||||
try:
|
||||
@@ -189,7 +322,7 @@ class GitRepo:
|
||||
except (ValueError, OSError):
|
||||
return self.repo.git_dir
|
||||
|
||||
def get_commit_message(self, diffs, context):
|
||||
def get_commit_message(self, diffs, context, user_language=None):
|
||||
diffs = "# Diffs:\n" + diffs
|
||||
|
||||
content = ""
|
||||
@@ -198,6 +331,11 @@ class GitRepo:
|
||||
content += diffs
|
||||
|
||||
system_content = self.commit_prompt or prompts.commit_system
|
||||
language_instruction = ""
|
||||
if user_language:
|
||||
language_instruction = f"\n- Is written in {user_language}."
|
||||
system_content = system_content.format(language_instruction=language_instruction)
|
||||
|
||||
messages = [
|
||||
dict(role="system", content=system_content),
|
||||
dict(role="user", content=content),
|
||||
|
||||
@@ -35,6 +35,8 @@ CACHE_VERSION = 3
|
||||
if USING_TSL_PACK:
|
||||
CACHE_VERSION = 4
|
||||
|
||||
UPDATING_REPO_MAP_MESSAGE = "Updating repo map"
|
||||
|
||||
|
||||
class RepoMap:
|
||||
TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}"
|
||||
@@ -380,7 +382,7 @@ class RepoMap:
|
||||
if self.verbose:
|
||||
self.io.tool_output(f"Processing {fname}")
|
||||
if progress and not showing_bar:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {fname}")
|
||||
|
||||
try:
|
||||
file_ok = Path(fname).is_file()
|
||||
@@ -459,7 +461,7 @@ class RepoMap:
|
||||
|
||||
for ident in idents:
|
||||
if progress:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {ident}")
|
||||
|
||||
definers = defines[ident]
|
||||
|
||||
@@ -512,7 +514,7 @@ class RepoMap:
|
||||
ranked_definitions = defaultdict(float)
|
||||
for src in G.nodes:
|
||||
if progress:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {src}")
|
||||
|
||||
src_rank = ranked[src]
|
||||
total_weight = sum(data["weight"] for _src, _dst, data in G.out_edges(src, data=True))
|
||||
@@ -621,7 +623,7 @@ class RepoMap:
|
||||
if not mentioned_idents:
|
||||
mentioned_idents = set()
|
||||
|
||||
spin = Spinner("Updating repo map")
|
||||
spin = Spinner(UPDATING_REPO_MAP_MESSAGE)
|
||||
|
||||
ranked_tags = self.get_ranked_tags(
|
||||
chat_fnames,
|
||||
@@ -655,7 +657,11 @@ class RepoMap:
|
||||
while lower_bound <= upper_bound:
|
||||
# dump(lower_bound, middle, upper_bound)
|
||||
|
||||
spin.step()
|
||||
if middle > 1500:
|
||||
show_tokens = f"{middle / 1000.0:.1f}K"
|
||||
else:
|
||||
show_tokens = str(middle)
|
||||
spin.step(f"{UPDATING_REPO_MAP_MESSAGE}: {show_tokens} tokens")
|
||||
|
||||
tree = self.to_tree(ranked_tags[:middle], chat_rel_fnames)
|
||||
num_tokens = self.token_count(tree)
|
||||
|
||||
@@ -15,22 +15,6 @@
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-r1": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.00000055,
|
||||
"input_cost_per_token_cache_hit": 0.00000014,
|
||||
"cache_read_input_token_cost": 0.00000014,
|
||||
"cache_creation_input_token_cost": 0.0,
|
||||
"output_cost_per_token": 0.00000219,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
//"supports_function_calling": true,
|
||||
"supports_assistant_prefill": true,
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-r1:free": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
@@ -99,15 +83,6 @@
|
||||
"output_cost_per_token": 0.000008,
|
||||
"mode": "chat",
|
||||
},
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-v3": {
|
||||
"max_tokens": 128000,
|
||||
"max_input_tokens": 100000,
|
||||
"max_output_tokens": 8192,
|
||||
"litellm_provider": "fireworks_ai",
|
||||
"input_cost_per_token": 0.0000009,
|
||||
"output_cost_per_token": 0.0000009,
|
||||
"mode": "chat",
|
||||
},
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-v3-0324": {
|
||||
"max_tokens": 160000,
|
||||
"max_input_tokens": 100000,
|
||||
@@ -117,54 +92,6 @@
|
||||
"output_cost_per_token": 0.0000009,
|
||||
"mode": "chat",
|
||||
},
|
||||
"o3-mini": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openai/o3-mini": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openai/o3-mini-high": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openrouter/quasar-alpha": {
|
||||
"max_input_tokens": 1000000,
|
||||
"max_output_tokens": 32000,
|
||||
@@ -203,26 +130,6 @@
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true
|
||||
},
|
||||
"claude-3-7-sonnet-20250219": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true,
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"anthropic/claude-3-7-sonnet-20250219": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
@@ -243,43 +150,6 @@
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"openrouter/anthropic/claude-3.7-sonnet": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true,
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gpt-4.5-preview": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.000075,
|
||||
"output_cost_per_token": 0.00015,
|
||||
"cache_read_input_token_cost": 0.0000375,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"openai/gpt-4.5-preview": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
@@ -334,42 +204,6 @@
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"gemini/gemini-2.5-pro-preview-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0.000010,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"litellm_provider": "gemini",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_audio_input": true,
|
||||
"supports_video_input": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"vertex_ai/gemini-2.5-pro-exp-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
@@ -478,7 +312,7 @@
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"openrouter/google/gemini-2.5-pro-exp-03-25:free": {
|
||||
"openrouter/google/gemini-2.5-pro-exp-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
@@ -523,15 +357,6 @@
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"xai/grok-3-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"litellm_provider": "xai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/x-ai/grok-3-mini-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
@@ -541,13 +366,22 @@
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"xai/grok-3-mini-beta": {
|
||||
"openrouter/x-ai/grok-3-fast-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0.0000003,
|
||||
"output_cost_per_token": 0.0000005,
|
||||
"litellm_provider": "xai",
|
||||
"input_cost_per_token": 0.000005,
|
||||
"output_cost_per_token": 0.000025,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/x-ai/grok-3-mini-fast-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0.0000006,
|
||||
"output_cost_per_token": 0.000004,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/google/gemini-2.0-flash-exp:free": {
|
||||
@@ -569,4 +403,66 @@
|
||||
"supports_audio_output": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gemini-2.5-pro-preview-05-06": {
|
||||
"max_tokens": 65536,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 65536,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.00000125,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_reasoning": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"],
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview"
|
||||
},
|
||||
"gemini/gemini-2.5-pro-preview-05-06": {
|
||||
"max_tokens": 65536,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 65536,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.0000007,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "gemini",
|
||||
"mode": "chat",
|
||||
"rpm": 10000,
|
||||
"tpm": 10000000,
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview"
|
||||
},
|
||||
"together_ai/Qwen/Qwen3-235B-A22B-fp8-tput": {
|
||||
"input_cost_per_token": 0.0000002,
|
||||
"output_cost_per_token": 0.0000006,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -958,6 +958,7 @@
|
||||
use_system_prompt: false
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-03-25
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: gemini/gemini-2.0-flash
|
||||
@@ -965,24 +966,28 @@
|
||||
- name: gemini/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: gemini/gemini-2.0-flash
|
||||
overeager: true
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
overeager: true
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
# Need metadata for this one...
|
||||
#weak_model_name: vertex_ai/gemini-2.0-flash
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-03-25
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
# Need metadata for this one...
|
||||
#weak_model_name: vertex_ai/gemini-2.0-flash
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/openrouter/quasar-alpha
|
||||
use_repo_map: true
|
||||
@@ -1006,17 +1011,428 @@
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: xai/grok-3-mini-beta
|
||||
use_repo_map: true
|
||||
edit_format: whole
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- reasoning_effort
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: low
|
||||
|
||||
- name: openrouter/x-ai/grok-3-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
|
||||
- name: xai/grok-3-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
|
||||
- name: openrouter/x-ai/grok-3-mini-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: whole
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: xai/grok-3-mini-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: whole
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openrouter/optimus-alpha
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys # user: 52.x%/96.9%
|
||||
examples_as_sys_msg: false # true: 51.6% correct, 95.6% well formed; false: 52.4%/98.2%
|
||||
editor_model_name: gpt-4.1-mini
|
||||
|
||||
- name: openai/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
editor_model_name: openai/gpt-4.1-mini
|
||||
|
||||
- name: azure/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
editor_model_name: azure/gpt-4.1-mini
|
||||
|
||||
- name: openrouter/openai/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1-mini
|
||||
|
||||
- name: gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false # false: 32.x%/92.4% (60+ malformed responses); true: 31.7/90.2/60+
|
||||
|
||||
- name: openai/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
|
||||
- name: azure/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
|
||||
- name: openrouter/openai/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
|
||||
- name: o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: gemini/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-03-25
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-05-06
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-05-06
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
|
||||
#- name: openrouter/qwen/qwen3-235b-a22b
|
||||
# system_prompt_prefix: "/no_think"
|
||||
# use_temperature: 0.7
|
||||
# extra_params:
|
||||
# max_tokens: 24000
|
||||
# top_p: 0.8
|
||||
# top_k: 20
|
||||
# min_p: 0.0
|
||||
# temperature: 0.7
|
||||
# extra_body:
|
||||
# provider:
|
||||
# order: ["Together"]
|
||||
|
||||
#- name: together_ai/Qwen/Qwen3-235B-A22B-fp8-tput
|
||||
# system_prompt_prefix: "/no_think"
|
||||
# use_temperature: 0.7
|
||||
# reasoning_tag: think
|
||||
# extra_params:
|
||||
# max_tokens: 24000
|
||||
# top_p: 0.8
|
||||
# top_k: 20
|
||||
# min_p: 0.0
|
||||
# temperature: 0.7
|
||||
|
||||
@@ -14,7 +14,7 @@ aider_user_agent = f"Aider/{__version__} +{urls.website}"
|
||||
# platforms.
|
||||
|
||||
|
||||
def install_playwright(io):
|
||||
def check_env():
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
@@ -29,6 +29,16 @@ def install_playwright(io):
|
||||
except Exception:
|
||||
has_chromium = False
|
||||
|
||||
return has_pip, has_chromium
|
||||
|
||||
|
||||
def has_playwright():
|
||||
has_pip, has_chromium = check_env()
|
||||
return has_pip and has_chromium
|
||||
|
||||
|
||||
def install_playwright(io):
|
||||
has_pip, has_chromium = check_env()
|
||||
if has_pip and has_chromium:
|
||||
return True
|
||||
|
||||
@@ -262,7 +272,7 @@ def slimdown_html(soup):
|
||||
|
||||
|
||||
def main(url):
|
||||
scraper = Scraper()
|
||||
scraper = Scraper(playwright_available=has_playwright())
|
||||
content = scraper.scrape(url)
|
||||
print(content)
|
||||
|
||||
|
||||
178
aider/utils.py
178
aider/utils.py
@@ -1,13 +1,14 @@
|
||||
import itertools
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import oslex
|
||||
from rich.console import Console
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
|
||||
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".pdf"}
|
||||
@@ -251,52 +252,151 @@ def run_install(cmd):
|
||||
|
||||
|
||||
class Spinner:
|
||||
unicode_spinner = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
||||
ascii_spinner = ["|", "/", "-", "\\"]
|
||||
"""
|
||||
Minimal spinner that scans a single marker back and forth across a line.
|
||||
|
||||
def __init__(self, text):
|
||||
The animation is pre-rendered into a list of frames. If the terminal
|
||||
cannot display unicode the frames are converted to plain ASCII.
|
||||
"""
|
||||
|
||||
last_frame_idx = 0 # Class variable to store the last frame index
|
||||
|
||||
def __init__(self, text: str, width: int = 7):
|
||||
self.text = text
|
||||
self.start_time = time.time()
|
||||
self.last_update = 0
|
||||
self.last_update = 0.0
|
||||
self.visible = False
|
||||
self.is_tty = sys.stdout.isatty()
|
||||
self.tested = False
|
||||
self.console = Console()
|
||||
|
||||
def test_charset(self):
|
||||
if self.tested:
|
||||
return
|
||||
self.tested = True
|
||||
# Try unicode first, fall back to ascii if needed
|
||||
# Pre-render the animation frames using pure ASCII so they will
|
||||
# always display, even on very limited terminals.
|
||||
ascii_frames = [
|
||||
"#= ", # C1 C2 space(8)
|
||||
"=# ", # C2 C1 space(8)
|
||||
" =# ", # space(1) C2 C1 space(7)
|
||||
" =# ", # space(2) C2 C1 space(6)
|
||||
" =# ", # space(3) C2 C1 space(5)
|
||||
" =# ", # space(4) C2 C1 space(4)
|
||||
" =# ", # space(5) C2 C1 space(3)
|
||||
" =# ", # space(6) C2 C1 space(2)
|
||||
" =# ", # space(7) C2 C1 space(1)
|
||||
" =#", # space(8) C2 C1
|
||||
" #=", # space(8) C1 C2
|
||||
" #= ", # space(7) C1 C2 space(1)
|
||||
" #= ", # space(6) C1 C2 space(2)
|
||||
" #= ", # space(5) C1 C2 space(3)
|
||||
" #= ", # space(4) C1 C2 space(4)
|
||||
" #= ", # space(3) C1 C2 space(5)
|
||||
" #= ", # space(2) C1 C2 space(6)
|
||||
" #= ", # space(1) C1 C2 space(7)
|
||||
]
|
||||
|
||||
self.unicode_palette = "░█"
|
||||
xlate_from, xlate_to = ("=#", self.unicode_palette)
|
||||
|
||||
# If unicode is supported, swap the ASCII chars for nicer glyphs.
|
||||
if self._supports_unicode():
|
||||
translation_table = str.maketrans(xlate_from, xlate_to)
|
||||
frames = [f.translate(translation_table) for f in ascii_frames]
|
||||
self.scan_char = xlate_to[xlate_from.find("#")]
|
||||
else:
|
||||
frames = ascii_frames
|
||||
self.scan_char = "#"
|
||||
|
||||
# Bounce the scanner back and forth.
|
||||
self.frames = frames
|
||||
self.frame_idx = Spinner.last_frame_idx # Initialize from class variable
|
||||
self.width = len(frames[0]) - 2 # number of chars between the brackets
|
||||
self.animation_len = len(frames[0])
|
||||
self.last_display_len = 0 # Length of the last spinner line (frame + text)
|
||||
|
||||
def _supports_unicode(self) -> bool:
|
||||
if not self.is_tty:
|
||||
return False
|
||||
try:
|
||||
# Test if we can print unicode characters
|
||||
print(self.unicode_spinner[0], end="", flush=True)
|
||||
print("\r", end="", flush=True)
|
||||
self.spinner_chars = itertools.cycle(self.unicode_spinner)
|
||||
out = self.unicode_palette
|
||||
out += "\b" * len(self.unicode_palette)
|
||||
out += " " * len(self.unicode_palette)
|
||||
out += "\b" * len(self.unicode_palette)
|
||||
sys.stdout.write(out)
|
||||
sys.stdout.flush()
|
||||
return True
|
||||
except UnicodeEncodeError:
|
||||
self.spinner_chars = itertools.cycle(self.ascii_spinner)
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _next_frame(self) -> str:
|
||||
frame = self.frames[self.frame_idx]
|
||||
self.frame_idx = (self.frame_idx + 1) % len(self.frames)
|
||||
Spinner.last_frame_idx = self.frame_idx # Update class variable
|
||||
return frame
|
||||
|
||||
def step(self, text: str = None) -> None:
|
||||
if text is not None:
|
||||
self.text = text
|
||||
|
||||
def step(self):
|
||||
if not self.is_tty:
|
||||
return
|
||||
|
||||
current_time = time.time()
|
||||
if not self.visible and current_time - self.start_time >= 0.5:
|
||||
now = time.time()
|
||||
if not self.visible and now - self.start_time >= 0.5:
|
||||
self.visible = True
|
||||
self._step()
|
||||
elif self.visible and current_time - self.last_update >= 0.1:
|
||||
self._step()
|
||||
self.last_update = current_time
|
||||
self.last_update = 0.0
|
||||
if self.is_tty:
|
||||
self.console.show_cursor(False)
|
||||
|
||||
def _step(self):
|
||||
if not self.visible:
|
||||
if not self.visible or now - self.last_update < 0.1:
|
||||
return
|
||||
|
||||
self.test_charset()
|
||||
print(f"\r{self.text} {next(self.spinner_chars)}\r{self.text} ", end="", flush=True)
|
||||
self.last_update = now
|
||||
frame_str = self._next_frame()
|
||||
|
||||
def end(self):
|
||||
# Determine the maximum width for the spinner line
|
||||
# Subtract 2 as requested, to leave a margin or prevent cursor wrapping issues
|
||||
max_spinner_width = self.console.width - 2
|
||||
if max_spinner_width < 0: # Handle extremely narrow terminals
|
||||
max_spinner_width = 0
|
||||
|
||||
current_text_payload = f" {self.text}"
|
||||
line_to_display = f"{frame_str}{current_text_payload}"
|
||||
|
||||
# Truncate the line if it's too long for the console width
|
||||
if len(line_to_display) > max_spinner_width:
|
||||
line_to_display = line_to_display[:max_spinner_width]
|
||||
|
||||
len_line_to_display = len(line_to_display)
|
||||
|
||||
# Calculate padding to clear any remnants from a longer previous line
|
||||
padding_to_clear = " " * max(0, self.last_display_len - len_line_to_display)
|
||||
|
||||
# Write the spinner frame, text, and any necessary clearing spaces
|
||||
sys.stdout.write(f"\r{line_to_display}{padding_to_clear}")
|
||||
self.last_display_len = len_line_to_display
|
||||
|
||||
# Calculate number of backspaces to position cursor at the scanner character
|
||||
scan_char_abs_pos = frame_str.find(self.scan_char)
|
||||
|
||||
# Total characters written to the line (frame + text + padding)
|
||||
total_chars_written_on_line = len_line_to_display + len(padding_to_clear)
|
||||
|
||||
# num_backspaces will be non-positive if scan_char_abs_pos is beyond
|
||||
# total_chars_written_on_line (e.g., if the scan char itself was truncated).
|
||||
# (e.g., if the scan char itself was truncated).
|
||||
# In such cases, (effectively) 0 backspaces are written,
|
||||
# and the cursor stays at the end of the line.
|
||||
num_backspaces = total_chars_written_on_line - scan_char_abs_pos
|
||||
sys.stdout.write("\b" * num_backspaces)
|
||||
sys.stdout.flush()
|
||||
|
||||
def end(self) -> None:
|
||||
if self.visible and self.is_tty:
|
||||
print("\r" + " " * (len(self.text) + 3))
|
||||
clear_len = self.last_display_len # Use the length of the last displayed content
|
||||
sys.stdout.write("\r" + " " * clear_len + "\r")
|
||||
sys.stdout.flush()
|
||||
self.console.show_cursor(True)
|
||||
self.visible = False
|
||||
|
||||
|
||||
def find_common_root(abs_fnames):
|
||||
@@ -384,18 +484,20 @@ def printable_shell_command(cmd_list):
|
||||
Returns:
|
||||
str: Shell-escaped command string.
|
||||
"""
|
||||
if platform.system() == "Windows":
|
||||
return subprocess.list2cmdline(cmd_list)
|
||||
else:
|
||||
return shlex.join(cmd_list)
|
||||
return oslex.join(cmd_list)
|
||||
|
||||
|
||||
def main():
|
||||
spinner = Spinner("Running spinner...")
|
||||
for _ in range(40): # 40 steps * 0.25 seconds = 10 seconds
|
||||
time.sleep(0.25)
|
||||
spinner.step()
|
||||
spinner.end()
|
||||
try:
|
||||
for _ in range(100):
|
||||
time.sleep(0.15)
|
||||
spinner.step()
|
||||
print("Success!")
|
||||
except KeyboardInterrupt:
|
||||
print("\nInterrupted by user.")
|
||||
finally:
|
||||
spinner.end()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
55
aider/waiting.py
Normal file
55
aider/waiting.py
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Thread-based, killable spinner utility.
|
||||
|
||||
Use it like:
|
||||
|
||||
from aider.waiting import WaitingSpinner
|
||||
|
||||
spinner = WaitingSpinner("Waiting for LLM")
|
||||
spinner.start()
|
||||
... # long task
|
||||
spinner.stop()
|
||||
"""
|
||||
|
||||
import threading
|
||||
import time
|
||||
|
||||
from aider.utils import Spinner
|
||||
|
||||
|
||||
class WaitingSpinner:
|
||||
"""Background spinner that can be started/stopped safely."""
|
||||
|
||||
def __init__(self, text: str = "Waiting for LLM", delay: float = 0.15):
|
||||
self.spinner = Spinner(text)
|
||||
self.delay = delay
|
||||
self._stop_event = threading.Event()
|
||||
self._thread = threading.Thread(target=self._spin, daemon=True)
|
||||
|
||||
def _spin(self):
|
||||
while not self._stop_event.is_set():
|
||||
time.sleep(self.delay)
|
||||
self.spinner.step()
|
||||
self.spinner.end()
|
||||
|
||||
def start(self):
|
||||
"""Start the spinner in a background thread."""
|
||||
if not self._thread.is_alive():
|
||||
self._thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Request the spinner to stop and wait briefly for the thread to exit."""
|
||||
self._stop_event.set()
|
||||
if self._thread.is_alive():
|
||||
self._thread.join(timeout=0.1)
|
||||
self.spinner.end()
|
||||
|
||||
# Allow use as a context-manager
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop()
|
||||
@@ -34,6 +34,8 @@ def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:
|
||||
"__pycache__/", # Python cache dir
|
||||
".DS_Store", # macOS metadata
|
||||
"Thumbs.db", # Windows thumbnail cache
|
||||
"*.svg",
|
||||
"*.pdf",
|
||||
# IDE files
|
||||
".idea/", # JetBrains IDEs
|
||||
".vscode/", # VS Code
|
||||
@@ -64,7 +66,9 @@ class FileWatcher:
|
||||
"""Watches source files for changes and AI comments"""
|
||||
|
||||
# Compiled regex pattern for AI comments
|
||||
ai_comment_pattern = re.compile(r"(?:#|//|--|;+) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE)
|
||||
ai_comment_pattern = re.compile(
|
||||
r"(?:#|//|--|;+) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE
|
||||
)
|
||||
|
||||
def __init__(self, coder, gitignores=None, verbose=False, analytics=None, root=None):
|
||||
self.coder = coder
|
||||
@@ -93,15 +97,19 @@ class FileWatcher:
|
||||
|
||||
rel_path = path_abs.relative_to(self.root)
|
||||
if self.verbose:
|
||||
dump(rel_path)
|
||||
print("Changed", rel_path)
|
||||
|
||||
if self.gitignore_spec and self.gitignore_spec.match_file(
|
||||
rel_path.as_posix() + ("/" if path_abs.is_dir() else "")
|
||||
):
|
||||
return False
|
||||
|
||||
# Check file size before reading content
|
||||
if path_abs.is_file() and path_abs.stat().st_size > 1 * 1024 * 1024: # 1MB limit
|
||||
return False
|
||||
|
||||
if self.verbose:
|
||||
dump("ok", rel_path)
|
||||
print("Checking", rel_path)
|
||||
|
||||
# Check if file contains AI markers
|
||||
try:
|
||||
|
||||
@@ -26,9 +26,80 @@ cog.out(text)
|
||||
|
||||
### main branch
|
||||
|
||||
- Added support for `gemini-2.5-pro-preview-05-06` models.
|
||||
- Added support for `qwen3-235b` models.
|
||||
- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
|
||||
- Added a spinner animation while waiting for the LLM to start streaming its response.
|
||||
- Updated the spinner animation to a Knight Rider style.
|
||||
- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
|
||||
- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
|
||||
- Marked Gemini 2.5 Pro preview models as `overeager` by default.
|
||||
- Commit message prompt specifies the user's language.
|
||||
- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
|
||||
- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
|
||||
- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
|
||||
- Added tracking of total tokens sent and received, now included in benchmark statistics.
|
||||
- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
|
||||
- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
|
||||
- Improved cost calculation using `litellm.completion_cost` where available.
|
||||
- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
|
||||
- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
|
||||
- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
|
||||
- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
|
||||
- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
|
||||
- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
|
||||
- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
|
||||
- Improved display of filenames in the prompt header using rich Text formatting.
|
||||
- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
|
||||
- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
|
||||
- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
|
||||
- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
|
||||
- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
|
||||
- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
|
||||
- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
|
||||
- Dropped support for Python 3.9.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.82.3
|
||||
|
||||
- Add support for `gemini-2.5-flash-preview-04-17` models.
|
||||
- Improved robustness of edit block parsing when filenames start with backticks or fences.
|
||||
- Add new `udiff-simple` edit format, for Gemini 2.5 Pro.
|
||||
- Update default weak/editor models for Gemini 2.5 Pro models to use `gemini-2.5-flash-preview-04-17`.
|
||||
- Instruct models to reply in the user's detected system language.
|
||||
- Fix parsing of diffs for newly created files (`--- /dev/null`).
|
||||
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
|
||||
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
|
||||
- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Skip scanning files larger than 1MB for AI comments (`--watch`).
|
||||
|
||||
### Aider v0.82.2
|
||||
|
||||
- Fix editing shell files with diff-fenced, by zjy1412.
|
||||
- Improve robustness of patch application by allowing multiple update/delete actions for the same file within a single response.
|
||||
- Update prompts to instruct LLMs to consolidate all edits for a given file into a single block within the patch.
|
||||
|
||||
### Aider v0.82.1
|
||||
|
||||
- Added support for `o3` and `o4-mini` including provider-specific versions for OpenAI, OpenRouter, and Azure.
|
||||
- Added support for Azure specific `gpt-4.1` and `gpt-4.1-mini` models.
|
||||
- Disabled streaming for `o3` models since you need identity verification to stream.
|
||||
- Fixed handling of file paths in unified diffs, especially those generated by git.
|
||||
|
||||
### Aider v0.82.0
|
||||
|
||||
- Support for GPT 4.1, mini and nano.
|
||||
- Added new `patch` edit format for OpenAI's GPT-4.1 model.
|
||||
- Improved support for using architect mode with Gemini 2.5 Pro.
|
||||
- Added new `editor-diff`, `editor-whole`, and `editor-diff-fenced` edit formats.
|
||||
- Bugfix for automatically selecting the best edit format to use in architect mode.
|
||||
- Added support for `grok-3-fast-beta` and `grok-3-mini-fast-beta` models.
|
||||
- Aider wrote 92% of the code in this release.
|
||||
|
||||
### Aider v0.81.3
|
||||
|
||||
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
|
||||
- Updated default settings for Grok models.
|
||||
- Aider wrote 64% of the code in this release.
|
||||
|
||||
### Aider v0.81.2
|
||||
|
||||
@@ -40,14 +111,12 @@ cog.out(text)
|
||||
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
|
||||
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
|
||||
- Commit messages generated by aider are now lowercase, by Anton Ödman.
|
||||
- Aider wrote 64% of the code in this release.
|
||||
|
||||
### Aider v0.81.1
|
||||
|
||||
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
|
||||
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
|
||||
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
|
||||
- Aider wrote 87% of the code in this release.
|
||||
|
||||
### Aider v0.81.0
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ aux_links:
|
||||
"GitHub":
|
||||
- "https://github.com/Aider-AI/aider"
|
||||
"Discord":
|
||||
- "https://discord.gg/Tv2uQnR88V"
|
||||
- "https://discord.gg/Y7X7bhMQFV"
|
||||
"Blog":
|
||||
- "/blog/"
|
||||
|
||||
@@ -40,7 +40,7 @@ nav_external_links:
|
||||
- title: "GitHub"
|
||||
url: "https://github.com/Aider-AI/aider"
|
||||
- title: "Discord"
|
||||
url: "https://discord.gg/Tv2uQnR88V"
|
||||
url: "https://discord.gg/Y7X7bhMQFV"
|
||||
|
||||
repository: Aider-AI/aider
|
||||
|
||||
|
||||
@@ -4448,3 +4448,55 @@
|
||||
Paul Gauthier (aider): 225
|
||||
start_tag: v0.80.0
|
||||
total_lines: 263
|
||||
- aider_percentage: 91.85
|
||||
aider_total: 1567
|
||||
end_date: '2025-04-14'
|
||||
end_tag: v0.82.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args_formatter.py:
|
||||
Paul Gauthier (aider): 4
|
||||
aider/coders/__init__.py:
|
||||
Paul Gauthier (aider): 4
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 5
|
||||
aider/coders/editor_diff_fenced_coder.py:
|
||||
Paul Gauthier (aider): 9
|
||||
aider/coders/patch_coder.py:
|
||||
Paul Gauthier (aider): 679
|
||||
aider/coders/search_replace.py:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/models.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 25
|
||||
aider/resources/model-settings.yml:
|
||||
Felix Lisczyk: 13
|
||||
Paul Gauthier: 37
|
||||
Paul Gauthier (aider): 68
|
||||
aider/website/_includes/leaderboard.js:
|
||||
Paul Gauthier: 38
|
||||
Paul Gauthier (aider): 6
|
||||
aider/website/_includes/leaderboard_table.js:
|
||||
Paul Gauthier (aider): 518
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 15
|
||||
Paul Gauthier (aider): 209
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 28
|
||||
scripts/homepage.py:
|
||||
Paul Gauthier (aider): 2
|
||||
scripts/versionbump.py:
|
||||
Paul Gauthier (aider): 11
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 25
|
||||
grand_total:
|
||||
Felix Lisczyk: 13
|
||||
Paul Gauthier: 126
|
||||
Paul Gauthier (aider): 1567
|
||||
start_tag: v0.81.0
|
||||
total_lines: 1706
|
||||
|
||||
@@ -643,7 +643,7 @@
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: "aider --model anthropic/claude-3-7-sonnet-20250219 # plus yml config"
|
||||
command: "aider --model anthropic/claude-3-7-sonnet-20250219 --thinking-tokens 32k"
|
||||
date: 2025-02-24
|
||||
versions: 0.75.1.dev
|
||||
seconds_per_case: 105.2
|
||||
@@ -831,7 +831,7 @@
|
||||
date: 2025-04-12
|
||||
versions: 0.81.3.dev
|
||||
seconds_per_case: 45.3
|
||||
total_cost: 6.3174
|
||||
total_cost: 0 # incorrect: 6.3174
|
||||
|
||||
- dirname: 2025-03-29-05-24-55--chatgpt4o-mar28-diff
|
||||
test_cases: 225
|
||||
@@ -1013,4 +1013,296 @@
|
||||
date: 2025-04-10
|
||||
versions: 0.81.2.dev
|
||||
seconds_per_case: 18.4
|
||||
total_cost: 0.0000
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-14-21-05-54--gpt41-diff-exuser
|
||||
test_cases: 225
|
||||
model: gpt-4.1
|
||||
edit_format: diff
|
||||
commit_hash: 7a87db5-dirty
|
||||
pass_rate_1: 20.0
|
||||
pass_rate_2: 52.4
|
||||
pass_num_1: 45
|
||||
pass_num_2: 118
|
||||
percent_cases_well_formed: 98.2
|
||||
error_outputs: 6
|
||||
num_malformed_responses: 5
|
||||
num_with_malformed_responses: 4
|
||||
user_asks: 171
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model gpt-4.1
|
||||
date: 2025-04-14
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 20.5
|
||||
total_cost: 9.8556
|
||||
|
||||
- dirname: 2025-04-14-21-27-53--gpt41mini-diff
|
||||
test_cases: 225
|
||||
model: gpt-4.1-mini
|
||||
edit_format: diff
|
||||
commit_hash: ffb743e-dirty
|
||||
pass_rate_1: 11.1
|
||||
pass_rate_2: 32.4
|
||||
pass_num_1: 25
|
||||
pass_num_2: 73
|
||||
percent_cases_well_formed: 92.4
|
||||
error_outputs: 64
|
||||
num_malformed_responses: 62
|
||||
num_with_malformed_responses: 17
|
||||
user_asks: 159
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 2
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model gpt-4.1-mini
|
||||
date: 2025-04-14
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 19.5
|
||||
total_cost: 1.9918
|
||||
|
||||
- dirname: 2025-04-14-22-46-01--gpt41nano-diff
|
||||
test_cases: 225
|
||||
model: gpt-4.1-nano
|
||||
edit_format: whole
|
||||
commit_hash: 71d1591-dirty
|
||||
pass_rate_1: 3.1
|
||||
pass_rate_2: 8.9
|
||||
pass_num_1: 7
|
||||
pass_num_2: 20
|
||||
percent_cases_well_formed: 94.2
|
||||
error_outputs: 20
|
||||
num_malformed_responses: 20
|
||||
num_with_malformed_responses: 13
|
||||
user_asks: 316
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 8
|
||||
total_tests: 225
|
||||
command: aider --model gpt-4.1-nano
|
||||
date: 2025-04-14
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 12.0
|
||||
total_cost: 0.4281
|
||||
|
||||
- dirname: 2025-04-16-21-20-55--o3-high-diff-temp0-exsys
|
||||
test_cases: 225
|
||||
model: o3 (high)
|
||||
edit_format: diff
|
||||
commit_hash: 24805ff-dirty
|
||||
pass_rate_1: 36.9
|
||||
pass_rate_2: 79.6
|
||||
pass_num_1: 83
|
||||
pass_num_2: 179
|
||||
percent_cases_well_formed: 95.1
|
||||
error_outputs: 11
|
||||
num_malformed_responses: 11
|
||||
num_with_malformed_responses: 11
|
||||
user_asks: 110
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model o3
|
||||
date: 2025-04-16
|
||||
versions: 0.82.1.dev
|
||||
seconds_per_case: 113.8
|
||||
total_cost: 111.0325
|
||||
|
||||
- dirname: 2025-04-16-22-01-58--o4-mini-high-diff-exsys
|
||||
test_cases: 225
|
||||
model: o4-mini (high)
|
||||
edit_format: diff
|
||||
commit_hash: b66901f-dirty
|
||||
pass_rate_1: 19.6
|
||||
pass_rate_2: 72.0
|
||||
pass_num_1: 44
|
||||
pass_num_2: 162
|
||||
percent_cases_well_formed: 90.7
|
||||
error_outputs: 26
|
||||
num_malformed_responses: 24
|
||||
num_with_malformed_responses: 21
|
||||
user_asks: 66
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model o4-mini
|
||||
date: 2025-04-16
|
||||
versions: 0.82.1.dev
|
||||
seconds_per_case: 176.5
|
||||
total_cost: 19.6399
|
||||
|
||||
- dirname: 2025-04-17-01-20-35--o3-mini-high-diff-arch
|
||||
test_cases: 225
|
||||
model: o3 (high) + gpt-4.1
|
||||
edit_format: architect
|
||||
commit_hash: 80909e1-dirty
|
||||
editor_model: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
pass_rate_1: 36.0
|
||||
pass_rate_2: 82.7
|
||||
pass_num_1: 81
|
||||
pass_num_2: 186
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 9
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 166
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 0
|
||||
total_tests: 225
|
||||
command: aider --model o3 --architect
|
||||
date: 2025-04-17
|
||||
versions: 0.82.2.dev
|
||||
seconds_per_case: 110.0
|
||||
total_cost: 69.2921
|
||||
|
||||
- dirname: 2025-04-19-14-43-04--o4-mini-patch
|
||||
test_cases: 225
|
||||
model: openhands-lm-32b-v0.1
|
||||
edit_format: whole
|
||||
commit_hash: c08336f
|
||||
pass_rate_1: 4.0
|
||||
pass_rate_2: 10.2
|
||||
pass_num_1: 9
|
||||
pass_num_2: 23
|
||||
percent_cases_well_formed: 95.1
|
||||
error_outputs: 55
|
||||
num_malformed_responses: 41
|
||||
num_with_malformed_responses: 11
|
||||
user_asks: 166
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 11
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/all-hands/openhands-lm-32b-v0.1
|
||||
date: 2025-04-19
|
||||
versions: 0.82.2.dev
|
||||
seconds_per_case: 195.6
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-20-19-54-31--flash25-diff-no-think
|
||||
test_cases: 225
|
||||
model: gemini-2.5-flash-preview-04-17 (default)
|
||||
edit_format: diff
|
||||
commit_hash: 7fcce5d-dirty
|
||||
pass_rate_1: 21.8
|
||||
pass_rate_2: 47.1
|
||||
pass_num_1: 49
|
||||
pass_num_2: 106
|
||||
percent_cases_well_formed: 85.3
|
||||
error_outputs: 60
|
||||
num_malformed_responses: 55
|
||||
num_with_malformed_responses: 33
|
||||
user_asks: 82
|
||||
lazy_comments: 1
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 5
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.5-flash-preview-04-17
|
||||
date: 2025-04-20
|
||||
versions: 0.82.3.dev
|
||||
seconds_per_case: 50.1
|
||||
total_cost: 1.8451
|
||||
|
||||
- dirname: 2025-05-07-19-32-40--gemini0506-diff-fenced-completion_cost
|
||||
test_cases: 225
|
||||
model: Gemini 2.5 Pro Preview 05-06
|
||||
edit_format: diff-fenced
|
||||
commit_hash: 3b08327-dirty
|
||||
pass_rate_1: 36.4
|
||||
pass_rate_2: 76.9
|
||||
pass_num_1: 82
|
||||
pass_num_2: 173
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 15
|
||||
num_malformed_responses: 7
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 105
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.5-pro-preview-05-06
|
||||
date: 2025-05-07
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 165.3
|
||||
total_cost: 37.4104
|
||||
|
||||
- dirname: 2025-05-08-03-20-24--qwen3-32b-default
|
||||
test_cases: 225
|
||||
model: Qwen3 32B
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty, aeaf259
|
||||
pass_rate_1: 14.2
|
||||
pass_rate_2: 40.0
|
||||
pass_num_1: 32
|
||||
pass_num_2: 90
|
||||
percent_cases_well_formed: 83.6
|
||||
error_outputs: 119
|
||||
num_malformed_responses: 50
|
||||
num_with_malformed_responses: 37
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 12
|
||||
prompt_tokens: 317591
|
||||
completion_tokens: 120418
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-32b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 372.2
|
||||
total_cost: 0.7603
|
||||
|
||||
- dirname: 2025-05-08-03-22-37--qwen3-235b-defaults
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty
|
||||
pass_rate_1: 17.3
|
||||
pass_rate_2: 49.8
|
||||
pass_num_1: 39
|
||||
pass_num_2: 112
|
||||
percent_cases_well_formed: 91.6
|
||||
error_outputs: 58
|
||||
num_malformed_responses: 29
|
||||
num_with_malformed_responses: 19
|
||||
user_asks: 102
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 0
|
||||
completion_tokens: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 428.1
|
||||
total_cost: 1.8037
|
||||
215
aider/website/_data/qwen3_leaderboard.yml
Normal file
215
aider/website/_data/qwen3_leaderboard.yml
Normal file
@@ -0,0 +1,215 @@
|
||||
- dirname: 2025-05-08-03-20-24--qwen3-32b-default
|
||||
test_cases: 225
|
||||
model: Qwen3 32B diff on OpenRouter, all providers, default settings (thinking)
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty, aeaf259
|
||||
pass_rate_1: 14.2
|
||||
pass_rate_2: 40.0
|
||||
pass_num_1: 32
|
||||
pass_num_2: 90
|
||||
percent_cases_well_formed: 83.6
|
||||
error_outputs: 119
|
||||
num_malformed_responses: 50
|
||||
num_with_malformed_responses: 37
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 12
|
||||
prompt_tokens: 317591
|
||||
completion_tokens: 120418
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-32b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 372.2
|
||||
total_cost: 0.7603
|
||||
|
||||
- dirname: 2025-05-08-03-22-37--qwen3-235b-defaults
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff on OpenRouter, all providers, default settings (thinking)
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty
|
||||
pass_rate_1: 17.3
|
||||
pass_rate_2: 49.8
|
||||
pass_num_1: 39
|
||||
pass_num_2: 112
|
||||
percent_cases_well_formed: 91.6
|
||||
error_outputs: 58
|
||||
num_malformed_responses: 29
|
||||
num_with_malformed_responses: 19
|
||||
user_asks: 102
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 0
|
||||
completion_tokens: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 428.1
|
||||
total_cost: 1.8037
|
||||
|
||||
|
||||
- dirname: 2025-05-08-17-39-14--qwen3-235b-or-together-only
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff on OpenRouter only TogetherAI, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 328584e
|
||||
pass_rate_1: 28.0
|
||||
pass_rate_2: 54.7
|
||||
pass_num_1: 63
|
||||
pass_num_2: 123
|
||||
percent_cases_well_formed: 90.7
|
||||
error_outputs: 39
|
||||
num_malformed_responses: 32
|
||||
num_with_malformed_responses: 21
|
||||
user_asks: 106
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2816606
|
||||
completion_tokens: 362346
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 77.2
|
||||
total_cost: 0.6399
|
||||
|
||||
|
||||
- dirname: 2025-04-30-04-49-37--Qwen3-235B-A22B-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B whole with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 28.0
|
||||
pass_rate_2: 65.3
|
||||
pass_num_1: 63
|
||||
pass_num_2: 147
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 166
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 0
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 166.0
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-49-50--Qwen3-235B-A22B-diff-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B diff with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 29.8
|
||||
pass_rate_2: 61.3
|
||||
pass_num_1: 67
|
||||
pass_num_2: 138
|
||||
percent_cases_well_formed: 94.7
|
||||
error_outputs: 25
|
||||
num_malformed_responses: 25
|
||||
num_with_malformed_responses: 12
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 158.2
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-08-41--Qwen3-32B-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-32B whole with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 45.8
|
||||
pass_num_1: 46
|
||||
pass_num_2: 103
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 94
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-32B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 48.1
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-08-51--Qwen3-32B-diff-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-32B diff with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 41.3
|
||||
pass_num_1: 46
|
||||
pass_num_2: 93
|
||||
percent_cases_well_formed: 94.2
|
||||
error_outputs: 17
|
||||
num_malformed_responses: 14
|
||||
num_with_malformed_responses: 13
|
||||
user_asks: 83
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-32B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 59.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-05-07-03-15-59--Qwen3-235B-A22B-Q5_K_M-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B whole with llama.cpp, Q5_K_M (unsloth), recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 8159cbf
|
||||
pass_rate_1: 27.1
|
||||
pass_rate_2: 59.1
|
||||
pass_num_1: 61
|
||||
pass_num_2: 133
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 1
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 169
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B-Q5_K_M
|
||||
date: 2025-05-07
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 635.2
|
||||
total_cost: 0.0000
|
||||
@@ -27,7 +27,7 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
labels: labels,
|
||||
datasets: [{
|
||||
label: 'Aider\'s percent of new code by release',
|
||||
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.aider_percentage }}, lines: {{ row.aider_total }} },{% endfor %}],
|
||||
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.aider_percentage }}, lines: {{ row.aider_total }}, end_date: '{{ row.end_date }}' },{% endfor %}],
|
||||
backgroundColor: 'rgba(54, 162, 235, 0.8)',
|
||||
borderColor: 'rgba(54, 162, 235, 1)',
|
||||
borderWidth: 1
|
||||
@@ -88,6 +88,10 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
var value = context.parsed.y || 0;
|
||||
var lines = context.raw.lines || 0;
|
||||
return `${label}: ${Math.round(value)}% (${lines} lines)`;
|
||||
},
|
||||
afterLabel: function(context) {
|
||||
let date = context.raw.end_date || 'n/a';
|
||||
return `Date: ` + date;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
|
||||
If you already have python 3.8-3.13 installed, you can get started quickly like this:
|
||||
If you already have python 3.8-3.13 installed, you can get started quickly like this.
|
||||
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Start working with aider on your codebase:
|
||||
|
||||
```bash
|
||||
python -m pip install aider-install
|
||||
aider-install
|
||||
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ If you need more help, please check our
|
||||
[GitHub issues](https://github.com/Aider-AI/aider/issues)
|
||||
and file a new issue if your problem isn't discussed.
|
||||
Or drop into our
|
||||
[Discord](https://discord.gg/Tv2uQnR88V)
|
||||
[Discord](https://discord.gg/Y7X7bhMQFV)
|
||||
to chat with us.
|
||||
|
||||
When reporting problems, it is very helpful if you can provide:
|
||||
|
||||
5
aider/website/_includes/install.md
Normal file
5
aider/website/_includes/install.md
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
```bash
|
||||
python -m pip install aider-install
|
||||
aider-install
|
||||
```
|
||||
520
aider/website/_includes/leaderboard_table.js
Normal file
520
aider/website/_includes/leaderboard_table.js
Normal file
@@ -0,0 +1,520 @@
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
let currentMode = 'view'; // 'view', 'select', 'detail'
|
||||
let selectedRows = new Set(); // Store indices of selected rows
|
||||
const MAX_DISPLAY_COST_CAP = 75; // Define the constant here
|
||||
|
||||
const allMainRows = document.querySelectorAll('tr[id^="main-row-"]');
|
||||
const allDetailsRows = document.querySelectorAll('tr[id^="details-"]');
|
||||
const searchInput = document.getElementById('editSearchInput');
|
||||
const modeViewButton = document.getElementById('mode-view-btn');
|
||||
const modeDetailButton = document.getElementById('mode-detail-btn');
|
||||
const modeSelectButton = document.getElementById('mode-select-btn');
|
||||
const modeButtons = [modeViewButton, modeSelectButton, modeDetailButton];
|
||||
const selectAllCheckbox = document.getElementById('select-all-checkbox');
|
||||
const leaderboardTitle = document.getElementById('leaderboard-title'); // Get title element
|
||||
const defaultTitle = "Aider polyglot coding leaderboard";
|
||||
const filteredTitle = "Aider polyglot coding benchmark results (selected)";
|
||||
|
||||
function applySearchFilter() {
|
||||
const searchTerm = searchInput.value.toLowerCase();
|
||||
allMainRows.forEach(row => {
|
||||
const textContent = row.textContent.toLowerCase();
|
||||
const detailsRow = document.getElementById(row.id.replace('main-row-', 'details-'));
|
||||
const matchesSearch = textContent.includes(searchTerm);
|
||||
|
||||
if (matchesSearch) {
|
||||
row.classList.remove('hidden-by-search');
|
||||
if (detailsRow) detailsRow.classList.remove('hidden-by-search');
|
||||
} else {
|
||||
row.classList.add('hidden-by-search');
|
||||
if (detailsRow) detailsRow.classList.add('hidden-by-search');
|
||||
}
|
||||
});
|
||||
// After applying search filter, re-apply view mode filter and update select-all state
|
||||
updateTableView(currentMode);
|
||||
if (currentMode === 'select') {
|
||||
updateSelectAllCheckboxState();
|
||||
}
|
||||
|
||||
// Update cost bars and ticks since visible rows may have changed
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
}
|
||||
|
||||
function getVisibleMainRows() {
|
||||
// Helper to get rows currently visible (not hidden by search or mode)
|
||||
return Array.from(allMainRows).filter(row =>
|
||||
!row.classList.contains('hidden-by-search') && !row.classList.contains('hidden-by-mode')
|
||||
);
|
||||
}
|
||||
|
||||
function updateSelectAllCheckboxState() {
|
||||
// Update the header checkbox based on the selection state of *visible* rows
|
||||
if (currentMode !== 'select') return; // Only relevant in select mode
|
||||
|
||||
const visibleRows = getVisibleMainRows();
|
||||
const visibleRowCount = visibleRows.length;
|
||||
const selectedVisibleRowCount = visibleRows.filter(row => selectedRows.has(row.querySelector('.row-selector')?.dataset.rowIndex)).length;
|
||||
|
||||
if (visibleRowCount === 0) {
|
||||
selectAllCheckbox.checked = false;
|
||||
selectAllCheckbox.indeterminate = false;
|
||||
} else if (selectedVisibleRowCount === visibleRowCount) {
|
||||
selectAllCheckbox.checked = true;
|
||||
selectAllCheckbox.indeterminate = false;
|
||||
} else if (selectedVisibleRowCount > 0) {
|
||||
selectAllCheckbox.checked = false;
|
||||
selectAllCheckbox.indeterminate = true;
|
||||
} else {
|
||||
selectAllCheckbox.checked = false;
|
||||
selectAllCheckbox.indeterminate = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function updateTableView(mode) {
|
||||
currentMode = mode; // Update global state ('view', 'select', 'detail')
|
||||
|
||||
// Update button styles first
|
||||
modeButtons.forEach(btn => {
|
||||
btn.classList.remove('active');
|
||||
// Reset specific styles potentially added by .active
|
||||
btn.style.backgroundColor = '';
|
||||
btn.style.color = '';
|
||||
});
|
||||
let activeButton;
|
||||
if (mode === 'view') activeButton = modeViewButton;
|
||||
else if (mode === 'select') activeButton = modeSelectButton;
|
||||
else if (mode === 'detail') activeButton = modeDetailButton;
|
||||
|
||||
activeButton.classList.add('active');
|
||||
activeButton.style.backgroundColor = '#e7f3ff'; // Use selected row highlight blue
|
||||
activeButton.style.color = '#495057'; // Use dark text for contrast on light blue
|
||||
|
||||
// Get the first header cell (for the toggle/checkbox column)
|
||||
const firstHeaderCell = document.querySelector('table thead th:first-child');
|
||||
|
||||
// Show/hide header checkbox based on mode
|
||||
selectAllCheckbox.style.display = mode === 'select' ? 'inline-block' : 'none';
|
||||
|
||||
allMainRows.forEach(row => {
|
||||
const rowIndex = row.querySelector('.row-selector')?.dataset.rowIndex;
|
||||
const toggleButton = row.querySelector('.toggle-details');
|
||||
const selectorCheckbox = row.querySelector('.row-selector');
|
||||
const firstCell = row.querySelector('td:first-child'); // Get the first cell of the main row
|
||||
const detailsRow = document.getElementById(`details-${rowIndex}`);
|
||||
const isSelected = selectedRows.has(rowIndex);
|
||||
|
||||
// Reset visibility classes before applying mode logic
|
||||
row.classList.remove('hidden-by-mode');
|
||||
if (detailsRow) detailsRow.classList.remove('hidden-by-mode');
|
||||
|
||||
// Show/hide the first column (header and data cells) based on mode
|
||||
if (firstHeaderCell) {
|
||||
firstHeaderCell.style.display = mode === 'view' ? 'none' : '';
|
||||
}
|
||||
if (firstCell) {
|
||||
firstCell.style.display = mode === 'view' ? 'none' : '';
|
||||
}
|
||||
|
||||
// Apply mode-specific logic
|
||||
if (mode === 'view') { // --- VIEW MODE ---
|
||||
toggleButton.style.display = 'none'; // Hide toggle in view mode
|
||||
selectorCheckbox.style.display = 'none';
|
||||
row.classList.remove('row-selected'); // Ensure no selection highlight
|
||||
// view-highlighted is handled by row click listener
|
||||
|
||||
// In 'view' mode, hide row if selections exist AND this row is NOT selected
|
||||
if (selectedRows.size > 0 && !isSelected) {
|
||||
row.classList.add('hidden-by-mode');
|
||||
if (detailsRow) detailsRow.classList.add('hidden-by-mode');
|
||||
} else {
|
||||
// Ensure row is not hidden by mode if it's selected or no selections exist
|
||||
// This is handled by the reset at the start of the loop:
|
||||
// row.classList.remove('hidden-by-mode');
|
||||
// if (detailsRow) detailsRow.classList.remove('hidden-by-mode');
|
||||
}
|
||||
// Always hide details row content in view mode regardless of visibility class
|
||||
if (detailsRow) {
|
||||
detailsRow.style.display = 'none';
|
||||
}
|
||||
|
||||
} else if (mode === 'select') { // --- SELECT MODE ---
|
||||
toggleButton.style.display = 'none';
|
||||
selectorCheckbox.style.display = 'inline-block';
|
||||
selectorCheckbox.checked = isSelected;
|
||||
row.classList.toggle('row-selected', isSelected);
|
||||
row.classList.remove('view-highlighted'); // Clear view highlight when switching to select
|
||||
// Always hide details row in select mode
|
||||
if (detailsRow) detailsRow.style.display = 'none';
|
||||
|
||||
// In 'select' mode, no rows should be hidden based on selection status
|
||||
row.classList.remove('hidden-by-mode');
|
||||
if (detailsRow) detailsRow.classList.remove('hidden-by-mode');
|
||||
|
||||
} else { // --- DETAIL MODE --- (mode === 'detail')
|
||||
toggleButton.style.display = 'inline-block'; // Show toggle
|
||||
selectorCheckbox.style.display = 'none';
|
||||
row.classList.remove('row-selected'); // Clear selection highlight
|
||||
row.classList.remove('view-highlighted'); // Clear view highlight when switching to detail
|
||||
// Details row visibility is controlled by the toggle button state, don't force hide/show here
|
||||
// Ensure main row is visible if not hidden by search
|
||||
row.classList.remove('hidden-by-mode');
|
||||
if (detailsRow) {
|
||||
detailsRow.classList.remove('hidden-by-mode');
|
||||
// Preserve existing display state (controlled by toggle) unless hidden by search
|
||||
if (detailsRow.classList.contains('hidden-by-search')) {
|
||||
detailsRow.style.display = 'none';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Ensure rows hidden by search remain hidden regardless of mode
|
||||
if (row.classList.contains('hidden-by-search')) {
|
||||
row.style.display = 'none';
|
||||
if (detailsRow) detailsRow.style.display = 'none';
|
||||
} else if (!row.classList.contains('hidden-by-mode')) {
|
||||
// Make row visible if not hidden by search or mode
|
||||
row.style.display = ''; // Or 'table-row' if needed, but '' usually works
|
||||
} else {
|
||||
// Row is hidden by mode, ensure it's hidden
|
||||
row.style.display = 'none';
|
||||
if (detailsRow) detailsRow.style.display = 'none';
|
||||
}
|
||||
|
||||
|
||||
});
|
||||
|
||||
// Update the leaderboard title based on mode and selection
|
||||
if (leaderboardTitle) {
|
||||
// Check if a custom title is provided globally
|
||||
if (typeof LEADERBOARD_CUSTOM_TITLE !== 'undefined' && LEADERBOARD_CUSTOM_TITLE) {
|
||||
leaderboardTitle.textContent = LEADERBOARD_CUSTOM_TITLE;
|
||||
} else {
|
||||
if (currentMode === 'view' && selectedRows.size > 0) {
|
||||
leaderboardTitle.textContent = filteredTitle;
|
||||
} else {
|
||||
leaderboardTitle.textContent = defaultTitle;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the select-all checkbox state after updating the view
|
||||
updateSelectAllCheckboxState();
|
||||
|
||||
// Update cost bars and ticks since visible/selected rows may have changed
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
}
|
||||
|
||||
|
||||
// --- Existing Initializations ---
|
||||
// Add percentage ticks
|
||||
const percentCells = document.querySelectorAll('.bar-cell:not(.cost-bar-cell)');
|
||||
percentCells.forEach(cell => {
|
||||
// Add ticks at 0%, 10%, 20%, ..., 100%
|
||||
for (let i = 0; i <= 100; i += 10) {
|
||||
const tick = document.createElement('div');
|
||||
tick.className = 'percent-tick';
|
||||
tick.style.left = `${i}%`;
|
||||
cell.appendChild(tick);
|
||||
}
|
||||
});
|
||||
|
||||
// Function to calculate the appropriate max display cost based on visible/selected entries
|
||||
function calculateDisplayMaxCost() {
|
||||
// Get the appropriate set of rows based on the current mode and selection state
|
||||
let rowsToConsider;
|
||||
|
||||
if (currentMode === 'view' && selectedRows.size > 0) {
|
||||
// In view mode with selections, only consider selected rows
|
||||
rowsToConsider = Array.from(allMainRows).filter(row => {
|
||||
const rowIndex = row.querySelector('.row-selector')?.dataset.rowIndex;
|
||||
return rowIndex && selectedRows.has(rowIndex) && !row.classList.contains('hidden-by-search');
|
||||
});
|
||||
} else {
|
||||
// In other modes or without selections, consider all visible rows
|
||||
rowsToConsider = getVisibleMainRows();
|
||||
}
|
||||
|
||||
// Find the maximum cost among the rows to consider
|
||||
let maxCost = 0;
|
||||
rowsToConsider.forEach(row => {
|
||||
const costBar = row.querySelector('.cost-bar');
|
||||
if (costBar) {
|
||||
const cost = parseFloat(costBar.dataset.cost || '0');
|
||||
if (cost > maxCost) maxCost = cost;
|
||||
}
|
||||
});
|
||||
|
||||
// Cap at MAX_DISPLAY_COST_CAP if any entries exceed that amount, otherwise use actual max
|
||||
return maxCost > MAX_DISPLAY_COST_CAP ? MAX_DISPLAY_COST_CAP : Math.max(1, maxCost); // Ensure at least 1 to avoid division by zero
|
||||
}
|
||||
|
||||
// Process cost bars with dynamic scale
|
||||
function updateCostBars() {
|
||||
const costBars = document.querySelectorAll('.cost-bar');
|
||||
const currentMaxDisplayCost = calculateDisplayMaxCost();
|
||||
|
||||
// Remove existing special indicators first
|
||||
document.querySelectorAll('.dark-section, .tear-line').forEach(el => el.remove());
|
||||
|
||||
costBars.forEach(bar => {
|
||||
const cost = parseFloat(bar.dataset.cost);
|
||||
|
||||
if (cost > 0) {
|
||||
// Calculate percentage based on the dynamic display max
|
||||
const percent = Math.min(cost, currentMaxDisplayCost) / currentMaxDisplayCost * 100;
|
||||
// Clamp percentage between 0 and 100
|
||||
bar.style.width = Math.max(0, Math.min(100, percent)) + '%';
|
||||
|
||||
// Mark bars that exceed the limit (only if our display max is capped at 50)
|
||||
if (currentMaxDisplayCost === MAX_DISPLAY_COST_CAP && cost > MAX_DISPLAY_COST_CAP) {
|
||||
// Create a darker section at the end with diagonal stripes
|
||||
const darkSection = document.createElement('div');
|
||||
darkSection.className = 'bar-viz dark-section';
|
||||
darkSection.style.width = '15%'; // From 85% to 100%
|
||||
darkSection.style.left = '85%';
|
||||
darkSection.style.backgroundColor = 'rgba(13, 110, 253, 0.6)'; // Darker blue
|
||||
darkSection.style.borderRight = '1px solid rgba(13, 110, 253, 0.8)';
|
||||
darkSection.style.zIndex = '1';
|
||||
// Add diagonal stripes with CSS background
|
||||
darkSection.style.backgroundImage = 'repeating-linear-gradient(45deg, rgba(255,255,255,0.3), rgba(255,255,255,0.3) 5px, transparent 5px, transparent 10px)';
|
||||
bar.parentNode.appendChild(darkSection);
|
||||
|
||||
// Add a dashed "tear line" at the transition point
|
||||
const tearLine = document.createElement('div');
|
||||
tearLine.className = 'tear-line';
|
||||
tearLine.style.position = 'absolute';
|
||||
tearLine.style.left = '85%';
|
||||
// Center the tear line vertically and make it 1.5x as tall as the bar
|
||||
tearLine.style.top = '50%';
|
||||
tearLine.style.transform = 'translateY(-50%)';
|
||||
tearLine.style.height = '54px'; // 1.5x the bar height (36px)
|
||||
tearLine.style.width = '2px';
|
||||
tearLine.style.backgroundColor = 'white';
|
||||
tearLine.style.borderLeft = '2px dashed rgba(0, 0, 0, 0.3)';
|
||||
tearLine.style.zIndex = '2'; // Above the bar
|
||||
bar.parentNode.appendChild(tearLine);
|
||||
}
|
||||
} else {
|
||||
// Set width to 0 if cost is 0 or negative
|
||||
bar.style.width = '0%';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Call this initially to set up the bars
|
||||
updateCostBars();
|
||||
|
||||
// Update cost ticks dynamically based on current max display cost
|
||||
function updateCostTicks() {
|
||||
const costCells = document.querySelectorAll('.cost-bar-cell');
|
||||
if (costCells.length === 0) return;
|
||||
|
||||
const currentMaxDisplayCost = calculateDisplayMaxCost();
|
||||
|
||||
// Remove existing ticks first
|
||||
document.querySelectorAll('.cost-tick').forEach(tick => tick.remove());
|
||||
|
||||
// Generate appropriate tick values based on current max
|
||||
let tickValues = [];
|
||||
|
||||
// Always use $10 increments, regardless of the max
|
||||
const maxTickValue = Math.ceil(currentMaxDisplayCost / 10) * 10; // Round up to nearest $10
|
||||
|
||||
for (let i = 0; i <= maxTickValue; i += 10) {
|
||||
tickValues.push(i);
|
||||
}
|
||||
|
||||
// Calculate percentage positions for each tick
|
||||
const tickPercentages = tickValues.map(tickCost => {
|
||||
return (tickCost / currentMaxDisplayCost) * 100;
|
||||
});
|
||||
|
||||
// Add tick divs to each cost cell
|
||||
costCells.forEach(cell => {
|
||||
const costBar = cell.querySelector('.cost-bar');
|
||||
// Use optional chaining and provide '0' as fallback if costBar or dataset.cost is missing
|
||||
const cost = parseFloat(costBar?.dataset?.cost || '0');
|
||||
|
||||
// Only add ticks if the cost is actually greater than 0
|
||||
if (cost > 0) {
|
||||
tickPercentages.forEach((percent, index) => {
|
||||
// Ensure percentage is within valid range
|
||||
if (percent >= 0 && percent <= 100) {
|
||||
const tick = document.createElement('div');
|
||||
tick.className = 'cost-tick';
|
||||
tick.style.left = `${percent}%`;
|
||||
cell.appendChild(tick);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Call this initially to set up the ticks
|
||||
updateCostTicks();
|
||||
|
||||
|
||||
// --- New Event Listeners ---
|
||||
|
||||
// Listener for mode toggle buttons
|
||||
modeButtons.forEach(button => {
|
||||
button.addEventListener('click', function(event) {
|
||||
const newMode = this.dataset.mode;
|
||||
if (newMode !== currentMode) {
|
||||
// Update active button style
|
||||
modeButtons.forEach(btn => {
|
||||
btn.classList.remove('active');
|
||||
// Reset specific styles potentially added by .active
|
||||
btn.style.backgroundColor = '';
|
||||
btn.style.color = '';
|
||||
});
|
||||
this.classList.add('active');
|
||||
// Apply active styles directly as inline styles might interfere
|
||||
this.style.backgroundColor = '#e7f3ff'; // Use selected row highlight blue
|
||||
this.style.color = '#495057'; // Use dark text for contrast on light blue
|
||||
|
||||
// Update table view and apply filters
|
||||
updateTableView(newMode);
|
||||
applySearchFilter(); // Re-apply search filter when mode changes
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Listener for row selector checkboxes (using event delegation on table body)
|
||||
const tableBody = document.querySelector('table tbody');
|
||||
tableBody.addEventListener('change', function(event) {
|
||||
if (event.target.classList.contains('row-selector') && currentMode === 'select') {
|
||||
const checkbox = event.target;
|
||||
const rowIndex = checkbox.dataset.rowIndex;
|
||||
const mainRow = checkbox.closest('tr');
|
||||
|
||||
if (checkbox.checked) {
|
||||
selectedRows.add(rowIndex);
|
||||
mainRow.classList.add('row-selected');
|
||||
} else {
|
||||
selectedRows.delete(rowIndex);
|
||||
mainRow.classList.remove('row-selected');
|
||||
}
|
||||
// Update select-all checkbox state
|
||||
updateSelectAllCheckboxState();
|
||||
|
||||
// Update cost bars and ticks if in view mode, as selection affects what's shown
|
||||
if (currentMode === 'view') {
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
}
|
||||
}
|
||||
}); // End of tableBody listener
|
||||
|
||||
// Listener for Select All checkbox
|
||||
selectAllCheckbox.addEventListener('change', function() {
|
||||
if (currentMode !== 'select') return;
|
||||
|
||||
const isChecked = selectAllCheckbox.checked;
|
||||
// Select/deselect only the rows that are currently visible
|
||||
const visibleRows = getVisibleMainRows();
|
||||
|
||||
visibleRows.forEach(row => {
|
||||
const checkbox = row.querySelector('.row-selector');
|
||||
const rowIndex = checkbox?.dataset.rowIndex;
|
||||
if (!checkbox || !rowIndex) return; // Skip if no checkbox/index found
|
||||
|
||||
// Only change state if it differs from target state
|
||||
if (checkbox.checked !== isChecked) {
|
||||
checkbox.checked = isChecked;
|
||||
row.classList.toggle('row-selected', isChecked);
|
||||
if (isChecked) {
|
||||
selectedRows.add(rowIndex);
|
||||
} else {
|
||||
selectedRows.delete(rowIndex);
|
||||
}
|
||||
}
|
||||
});
|
||||
// After bulk change, ensure the selectAll checkbox state is correct (not indeterminate)
|
||||
updateSelectAllCheckboxState();
|
||||
|
||||
// Update cost bars and ticks after selection changes
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
});
|
||||
|
||||
// Listener for search input
|
||||
searchInput.addEventListener('input', applySearchFilter);
|
||||
|
||||
// Add toggle functionality for details (Modified to respect modes)
|
||||
const toggleButtons = document.querySelectorAll('.toggle-details');
|
||||
toggleButtons.forEach(button => {
|
||||
button.addEventListener('click', function() {
|
||||
// Only allow toggling in 'detail' mode
|
||||
if (currentMode !== 'detail') return;
|
||||
|
||||
const targetId = this.getAttribute('data-target');
|
||||
const targetRow = document.getElementById(targetId);
|
||||
const mainRow = this.closest('tr'); // Get the main row associated with this button
|
||||
|
||||
if (targetRow && !mainRow.classList.contains('hidden-by-mode') && !mainRow.classList.contains('hidden-by-search')) {
|
||||
const isVisible = targetRow.style.display !== 'none';
|
||||
targetRow.style.display = isVisible ? 'none' : 'table-row';
|
||||
this.textContent = isVisible ? '▶' : '▼';
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Listener for clicking anywhere on a row
|
||||
tableBody.addEventListener('click', function(event) {
|
||||
const clickedRow = event.target.closest('tr');
|
||||
|
||||
// Ensure it's a main row and not a details row or header/footer
|
||||
if (!clickedRow || !clickedRow.id.startsWith('main-row-')) return;
|
||||
|
||||
// --- START conditional logic ---
|
||||
if (currentMode === 'select') {
|
||||
// --- SELECT MODE LOGIC (Existing) ---
|
||||
// Find the checkbox within this row
|
||||
const checkbox = clickedRow.querySelector('.row-selector');
|
||||
if (!checkbox) return; // No checkbox found in this row
|
||||
|
||||
// If the click was directly on the checkbox or its label (if any),
|
||||
// let the default behavior and the 'change' event listener handle it.
|
||||
// Otherwise, toggle the checkbox state programmatically.
|
||||
if (event.target !== checkbox && event.target.tagName !== 'LABEL' /* Add if you use labels */) {
|
||||
checkbox.checked = !checkbox.checked;
|
||||
// Manually trigger the change event to update state and UI
|
||||
checkbox.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
}
|
||||
// --- END SELECT MODE LOGIC ---
|
||||
|
||||
} else if (currentMode === 'view') {
|
||||
// --- VIEW MODE LOGIC (New) ---
|
||||
// Don't highlight if the click was on the details toggle button
|
||||
if (event.target.classList.contains('toggle-details')) {
|
||||
return;
|
||||
}
|
||||
// Toggle the highlight class on the clicked row
|
||||
clickedRow.classList.toggle('view-highlighted');
|
||||
// --- END VIEW MODE LOGIC ---
|
||||
}
|
||||
// --- END conditional logic ---
|
||||
});
|
||||
|
||||
|
||||
// --- Initial Setup ---
|
||||
updateTableView('view'); // Initialize view to 'view' mode
|
||||
applySearchFilter(); // Apply initial search filter (if any text is pre-filled or just to set initial state)
|
||||
|
||||
// Close button functionality
|
||||
const closeControlsBtn = document.getElementById('close-controls-btn');
|
||||
if (closeControlsBtn) {
|
||||
closeControlsBtn.addEventListener('click', function() {
|
||||
const controlsContainer = document.getElementById('controls-container');
|
||||
if (controlsContainer) {
|
||||
controlsContainer.style.display = 'none';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
});
|
||||
@@ -4,7 +4,7 @@ You can send long, multi-line messages in the chat in a few ways:
|
||||
- Or, start with `{tag` (where "tag" is any sequence of letters/numbers) and end with `tag}`. This is useful when you need to include closing braces `}` in your message.
|
||||
- Use Meta-ENTER to start a new line without sending the message (Esc+ENTER in some environments).
|
||||
- Use `/paste` to paste text from the clipboard into the chat.
|
||||
- Use the `/editor` command to open your editor to create the next chat message. See [editor configuration docs](/docs/config/editor.html) for more info.
|
||||
- Use the `/editor` command (or press `Ctrl-X Ctrl-E` if your terminal allows) to open your editor to create the next chat message. See [editor configuration docs](/docs/config/editor.html) for more info.
|
||||
- Use multiline-mode, which swaps the function of Meta-Enter and Enter, so that Enter inserts a newline, and Meta-Enter submits your command. To enable multiline mode:
|
||||
- Use the `/multiline-mode` command to toggle it during a session.
|
||||
- Use the `--multiline` switch.
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
Aider is on
|
||||
<a href="https://github.com/Aider-AI/aider">GitHub</a>
|
||||
and
|
||||
<a href="https://discord.gg/Tv2uQnR88V">Discord</a>.
|
||||
<a href="https://discord.gg/Y7X7bhMQFV">Discord</a>.
|
||||
</footer>
|
||||
|
||||
114
aider/website/_posts/2025-05-07-gemini-cost.md
Normal file
114
aider/website/_posts/2025-05-07-gemini-cost.md
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
title: Gemini 2.5 Pro Preview 03-25 benchmark cost
|
||||
excerpt: The $6.32 benchmark cost reported for Gemini 2.5 Pro Preview 03-25 was incorrect.
|
||||
draft: false
|
||||
nav_exclude: true
|
||||
---
|
||||
{% if page.date %}
|
||||
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
|
||||
{% endif %}
|
||||
|
||||
# Gemini 2.5 Pro Preview 03-25 benchmark cost
|
||||
|
||||
## Summary
|
||||
The $6.32 cost reported to run the aider polyglot benchmark on
|
||||
Gemini 2.5 Pro Preview 03-25 was incorrect.
|
||||
The true cost was higher, possibly significantly so.
|
||||
The incorrect cost has been removed from the leaderboard.
|
||||
|
||||
An investigation determined the primary cause was that the litellm
|
||||
package (used by aider for LLM API connections) was not properly including reasoning tokens in
|
||||
the token counts it reported.
|
||||
While an incorrect price-per-token entry for the model also existed in litellm's cost
|
||||
database at that time, this was found not to be a contributing factor.
|
||||
Aider's own internal, correct pricing data was utilized during the benchmark.
|
||||
|
||||
## Resolution
|
||||
|
||||
Litellm began correctly including reasoning tokens in the reported counts
|
||||
on April 21, 2025 in
|
||||
commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b).
|
||||
This change was released in litellm v1.67.1.
|
||||
Aider picked up this change April 28, 2025 when it upgraded its litellm dependency
|
||||
from v1.65.7 to v1.67.4.post1
|
||||
in commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37).
|
||||
That dependency change shipped on May 5, 2025 in aider v0.82.3.
|
||||
|
||||
Unfortunately the 03-25 version of Gemini 2.5 Pro Preview is no longer available,
|
||||
so it is not possible to re-run the benchmark to obtain an accurate cost.
|
||||
As a possibly relevant comparison, the newer 05-06 version of Gemini 2.5 Pro Preview
|
||||
completed the benchmark at a cost of about $37.
|
||||
|
||||
## Investigation detail
|
||||
|
||||
The version of litellm available at that time of the benchmark appears to have been
|
||||
excluding reasoning tokens from the token counts it reported.
|
||||
So even though aider had correct per-token pricing, it did not have the correct token counts
|
||||
used during the benchmark.
|
||||
This resulted in an underestimate of the benchmark costs.
|
||||
|
||||
The incorrect litellm database entry does not appear to have affected the aider benchmark costs.
|
||||
Aider maintains and uses its own database of costs for some models, and it contained
|
||||
the correct pricing at the time of the benchmark.
|
||||
Aider appears to have
|
||||
loaded the correct cost data from its database and made use of it during the benchmark.
|
||||
|
||||
Every aider benchmark report contains the git commit hash of the aider repository state used to
|
||||
run the benchmark.
|
||||
The
|
||||
[benchmark run in question](https://github.com/Aider-AI/aider/blob/edbfec0ce4e1fe86735c915cb425b0d8636edc32/aider/website/_data/polyglot_leaderboard.yml#L814)
|
||||
was built from
|
||||
commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
|
||||
|
||||
Additional runs of the benchmark from that build verified that the error in litellm's
|
||||
model cost database appears not to have been a factor:
|
||||
|
||||
- Aider's internal model database correctly overrides the litellm database, which contained an incorrect token cost at the time.
|
||||
- The correct pricing is loaded from aider's internal model database and produces similar (incorrect) costs as the original run.
|
||||
- Updating aider's internal model database with an absurdly high token cost resulted in an appropriately high benchmark cost report, demonstrating that the internal database costs were in effect.
|
||||
|
||||
This specific build of aider was then updated with various versions of litellm using `git biset`
|
||||
to identify the first litellm commit where reasoning tokens counts were correctly reported.
|
||||
|
||||
|
||||
|
||||
## Timeline
|
||||
|
||||
Below is the full timeline of git commits related to this issue in the aider and litellm repositories.
|
||||
Each entry has a UTC timestamp, followed by the original literal timestamp obtained from the
|
||||
relevant source.
|
||||
|
||||
- 2025-04-04 19:54:45 UTC (Sat Apr 5 08:54:45 2025 +1300)
|
||||
- Correct value `"output_cost_per_token": 0.000010` for `gemini/gemini-2.5-pro-preview-03-25` added to `aider/resources/model-metadata.json`
|
||||
- Commit [eda796d](https://github.com/Aider-AI/aider/commit/eda796d) in aider.
|
||||
|
||||
- 2025-04-05 16:20:01 UTC (Sun Apr 6 00:20:01 2025 +0800)
|
||||
- First litellm commit of `gemini/gemini-2.5-pro-preview-03-25` metadata, with incorrect price `"output_cost_per_token": 0.0000010`
|
||||
- Commit [cd0a1e6](https://github.com/BerriAI/litellm/commit/cd0a1e6) in litellm.
|
||||
|
||||
- 2025-04-10 01:48:43 UTC (Wed Apr 9 18:48:43 2025 -0700)
|
||||
- litellm commit updates `gemini/gemini-2.5-pro-preview-03-25` metadata, but not price
|
||||
- Commit [ac4f32f](https://github.com/BerriAI/litellm/commit/ac4f32f) in litellm.
|
||||
|
||||
- 2025-04-12 04:55:50 UTC (2025-04-12-04-55-50 UTC)
|
||||
- Benchmark performed.
|
||||
- Aider repo hash [0282574 recorded in benchmark results](https://github.com/Aider-AI/aider/blob/7fbeafa1cfd4ad83f7499417837cdfa6b16fe7a1/aider/website/_data/polyglot_leaderboard.yml#L814), without a "dirty" annotation, indicating that the benchmark was run on a clean checkout of the aider repo at commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
|
||||
- Correct value `"output_cost_per_token": 0.000010` is in `aider/resources/model-metadata.json` at this commit [0282574](https://github.com/Aider-AI/aider/blob/0282574/aider/resources/model-metadata.json#L357).
|
||||
|
||||
- 2025-04-12 15:06:39 UTC (Apr 12 08:06:39 2025 -0700)
|
||||
- Benchmark results added to aider repo.
|
||||
- Commit [7fbeafa](https://github.com/Aider-AI/aider/commit/7fbeafa) in aider.
|
||||
|
||||
- 2025-04-12 15:20:04 UTC (Sat Apr 12 19:20:04 2025 +0400)
|
||||
- litellm commit fixes `gemini/gemini-2.5-pro-preview-03-25` price metadata to `"output_cost_per_token": 0.00001`
|
||||
- Commit [93037ea](https://github.com/BerriAI/litellm/commit/93037ea) in litellm.
|
||||
|
||||
- 2025-04-22 05:48:00 UTC (Mon Apr 21 22:48:00 2025 -0700)
|
||||
- Litellm started including reasoning tokens in token count reporting.
|
||||
- Commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b) in litellm.
|
||||
- This fix was released in litellm v1.67.1.
|
||||
|
||||
- 2025-04-28 14:53:20 UTC (Mon Apr 28 07:53:20 2025 -0700)
|
||||
- Aider upgraded its litellm dependency from v1.65.7 to v1.67.4.post1, which included the reasoning token count fix.
|
||||
- Commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37) in aider.
|
||||
- This dependency change shipped on May 5, 2025 in aider v0.82.3.
|
||||
340
aider/website/_posts/2025-05-08-qwen3.md
Normal file
340
aider/website/_posts/2025-05-08-qwen3.md
Normal file
@@ -0,0 +1,340 @@
|
||||
---
|
||||
layout: post
|
||||
title: Qwen3 benchmark results
|
||||
excerpt: "Benchmark results for Qwen3 models using the Aider polyglot coding benchmark."
|
||||
highlight_image: /assets/2025-05-08-qwen3.jpg
|
||||
date: 2025-05-08
|
||||
---
|
||||
|
||||
# Qwen3 results on the aider polyglot benchmark
|
||||
|
||||
As [previously discussed when Qwen2.5 was released](/2024/11/21/quantization.html),
|
||||
details matter when working with open source models for AI coding.
|
||||
Proprietary models are served by their creators or trusted providers with stable inference settings.
|
||||
Open source models are wonderful because anyone can serve them,
|
||||
but API providers can use very different inference settings, quantizations, etc.
|
||||
|
||||
Below are collection of aider polyglot benchmark results for the new Qwen3 models.
|
||||
Results are presented using both "diff" and "whole"
|
||||
[edit formats](https://aider.chat/docs/more/edit-formats.html),
|
||||
with various models settings, against various API providers.
|
||||
|
||||
See details on the
|
||||
[model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
|
||||
used after the results table.
|
||||
|
||||
{: .note }
|
||||
This article is being updated as new results become available.
|
||||
Also, some results were submitted by aider users and have not been verified.
|
||||
|
||||
<h2 id="leaderboard-title">Qwen3 results on the aider polyglot benchmark</h2>
|
||||
|
||||
<div id="controls-container" style="display: flex; align-items: center; width: 100%; max-width: 800px; margin: 10px auto; gap: 10px; box-sizing: border-box; padding: 0 5px; position: relative;">
|
||||
<input type="text" id="editSearchInput" placeholder="Search..." style="flex-grow: 1; padding: 8px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
<div id="view-mode-toggle" style="display: inline-flex; border: 1px solid #ccc; border-radius: 4px;">
|
||||
<button id="mode-view-btn" class="mode-button active" data-mode="view" style="padding: 8px 8px; border: none; border-radius: 3px 0 0 3px; cursor: pointer; font-size: 14px; line-height: 1.5; min-width: 50px;">View</button>
|
||||
<button id="mode-select-btn" class="mode-button" data-mode="select" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Select</button>
|
||||
<button id="mode-detail-btn" class="mode-button" data-mode="detail" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0 3px 3px 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Detail</button>
|
||||
</div>
|
||||
<button id="close-controls-btn" style="width: 18px; height: 18px; padding: 0; border: 1px solid #ddd; border-radius: 50%; background-color: transparent; cursor: pointer; display: flex; align-items: center; justify-content: center; font-size: 12px; margin-left: 4px; color: #999;">×</button>
|
||||
</div>
|
||||
|
||||
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
|
||||
<thead style="background-color: #f2f2f2;">
|
||||
<tr>
|
||||
<th style="padding: 8px; width: 40px; text-align: center; vertical-align: middle;">
|
||||
<input type="checkbox" id="select-all-checkbox" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</th> <!-- Header checkbox added here -->
|
||||
<th style="padding: 8px; text-align: left;">Model</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Percent correct</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Cost</th>
|
||||
<th style="padding: 8px; text-align: left;" class="col-command">Command</th>
|
||||
<th style="padding: 8px; text-align: center; width: 10%" class="col-conform">Correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left; width: 10%" class="col-edit-format">Edit Format</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% assign max_cost = 0 %}
|
||||
{% for row in site.data.qwen3_leaderboard %}
|
||||
{% if row.total_cost > max_cost %}
|
||||
{% assign max_cost = row.total_cost %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if max_cost == 0 %}{% assign max_cost = 1 %}{% endif %}
|
||||
{% assign edit_sorted = site.data.qwen3_leaderboard | sort: 'pass_rate_2' | reverse %}
|
||||
{% for row in edit_sorted %} {% comment %} Add loop index for unique IDs {% endcomment %}
|
||||
{% assign row_index = forloop.index0 %}
|
||||
<tr id="main-row-{{ row_index }}">
|
||||
<td style="padding: 8px; text-align: center; vertical-align: middle;">
|
||||
<button class="toggle-details" data-target="details-{{ row_index }}" style="background: none; border: none; cursor: pointer; font-size: 16px; padding: 0; vertical-align: middle;">▶</button>
|
||||
<input type="checkbox" class="row-selector" data-row-index="{{ row_index }}" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</td>
|
||||
<td style="padding: 8px;"><span>{{ row.model }}</span></td>
|
||||
<td class="bar-cell">
|
||||
<div class="bar-viz" style="width: {{ row.pass_rate_2 }}%; background-color: rgba(40, 167, 69, 0.3); border-right: 1px solid rgba(40, 167, 69, 0.5);"></div>
|
||||
<span>{{ row.pass_rate_2 }}%</span>
|
||||
</td>
|
||||
<td class="bar-cell cost-bar-cell">
|
||||
{% if row.total_cost > 0 %}
|
||||
<div class="bar-viz cost-bar" data-cost="{{ row.total_cost }}" data-max-cost="{{ max_cost }}" style="width: 0%; background-color: rgba(13, 110, 253, 0.3); border-right: 1px solid rgba(13, 110, 253, 0.5);"></div>
|
||||
{% endif %}
|
||||
{% assign rounded_cost = row.total_cost | times: 1.0 | round: 2 %}
|
||||
<span>{% if row.total_cost == 0 or rounded_cost == 0.00 %}{% else %}${{ rounded_cost }}{% endif %}</span>
|
||||
</td>
|
||||
<td style="padding: 8px;" class="col-command"><span><code>{{ row.command }}</code></span></td>
|
||||
<td style="padding: 8px; text-align: center;" class="col-conform"><span>{{ row.percent_cases_well_formed }}%</span></td>
|
||||
<td style="padding: 8px;" class="col-edit-format"><span>{{ row.edit_format }}</span></td>
|
||||
</tr>
|
||||
<tr class="details-row" id="details-{{ row_index }}" style="display: none; background-color: #f9f9f9;">
|
||||
<td colspan="7" style="padding: 15px; border-bottom: 1px solid #ddd;">
|
||||
<ul style="margin: 0; padding-left: 20px; list-style: none; border-bottom: 1px solid #ddd;">
|
||||
{% for pair in row %}
|
||||
{% if pair[1] != "" and pair[1] != nil %}
|
||||
<li><strong>
|
||||
{% if pair[0] == 'percent_cases_well_formed' %}
|
||||
Percent cases well formed
|
||||
{% else %}
|
||||
{{ pair[0] | replace: '_', ' ' | capitalize }}
|
||||
{% endif %}
|
||||
:</strong>
|
||||
{% if pair[0] == 'command' %}<code>{{ pair[1] }}</code>{% else %}{{ pair[1] }}{% endif %}
|
||||
</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<style>
|
||||
#leaderboard-title {
|
||||
margin-bottom: 20px; /* Add space below the title */
|
||||
}
|
||||
tr.selected {
|
||||
color: #0056b3;
|
||||
}
|
||||
table {
|
||||
table-layout: fixed;
|
||||
}
|
||||
thead {
|
||||
border-top: 1px solid #ddd; /* Add top border to header */
|
||||
}
|
||||
td, th {
|
||||
border: none; /* Remove internal cell borders */
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
vertical-align: middle; /* Ensure consistent vertical alignment */
|
||||
}
|
||||
tbody tr {
|
||||
height: 50px; /* Set a minimum height for all data rows */
|
||||
}
|
||||
td.col-command { /* Command column */
|
||||
font-size: 12px; /* Keep font size adjustment for command column if desired, or remove */
|
||||
}
|
||||
|
||||
/* Hide new columns first on smaller screens */
|
||||
@media screen and (max-width: 991px) {
|
||||
th.col-conform, td.col-conform,
|
||||
th.col-edit-format, td.col-edit-format {
|
||||
display: none;
|
||||
}
|
||||
/* Increase width of Percent correct and Cost columns when others are hidden */
|
||||
th:nth-child(3), td:nth-child(3), /* Percent correct */
|
||||
th:nth-child(4), td:nth-child(4) { /* Cost */
|
||||
width: 33% !important; /* Override inline style */
|
||||
}
|
||||
}
|
||||
|
||||
/* Hide command column on even smaller screens */
|
||||
@media screen and (max-width: 767px) {
|
||||
th.col-command, td.col-command { /* Command column */
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* --- Control Styles --- */
|
||||
#controls-container {
|
||||
margin-bottom: 20px; /* Add some space below controls */
|
||||
}
|
||||
|
||||
#editSearchInput, #view-mode-select {
|
||||
padding: 8px 12px; /* Consistent padding */
|
||||
border: 1px solid #ccc; /* Slightly softer border */
|
||||
border-radius: 4px;
|
||||
font-size: 14px; /* Match table font size */
|
||||
height: 38px; /* Match height */
|
||||
box-sizing: border-box; /* Include padding/border in height */
|
||||
}
|
||||
|
||||
|
||||
.bar-cell {
|
||||
position: relative; /* Positioning context for the bar */
|
||||
padding: 8px;
|
||||
/* text-align: center; Removed */
|
||||
overflow: hidden; /* Prevent bar from overflowing cell boundaries if needed */
|
||||
}
|
||||
.cost-bar-cell {
|
||||
background-image: none; /* Remove default gradient for cost cells */
|
||||
}
|
||||
.percent-tick, .cost-tick {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
transform: translateY(10px);
|
||||
height: 8px; /* Short tick */
|
||||
width: 1px;
|
||||
background-color: rgba(170, 170, 170, 0.5);
|
||||
z-index: 2; /* Above the bar but below the text */
|
||||
}
|
||||
.bar-viz {
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 50%; /* Position at the middle of the cell */
|
||||
transform: translateY(-50%); /* Center the bar vertically */
|
||||
z-index: 1; /* Above background, below ticks and text */
|
||||
height: 36px;
|
||||
border-radius: 0 2px 2px 0; /* Slightly rounded end corners */
|
||||
/* Width and colors are set inline via style attribute */
|
||||
}
|
||||
/* Add a tooltip class for showing cost information on hover */
|
||||
.cost-bar-cell:hover .bar-viz[style*="background-image"] {
|
||||
animation: stripe-animation 2s linear infinite;
|
||||
}
|
||||
@keyframes stripe-animation {
|
||||
0% { background-position: 0 0; }
|
||||
100% { background-position: 20px 0; }
|
||||
}
|
||||
.bar-cell span {
|
||||
position: absolute; /* Position relative to the cell */
|
||||
left: 5px; /* Position slightly inside the left edge */
|
||||
top: 50%; /* Center vertically */
|
||||
transform: translateY(-50%); /* Adjust vertical centering */
|
||||
z-index: 3; /* Ensure text is above everything else */
|
||||
background-color: rgba(255, 255, 255, 0.7); /* Semi-transparent white background */
|
||||
padding: 0 4px; /* Add padding around the text */
|
||||
border-radius: 3px; /* Rounded corners for the text background */
|
||||
font-size: 14px; /* Adjust font size for the numbers */
|
||||
}
|
||||
.toggle-details {
|
||||
color: #888; /* Make toggle symbol more subtle */
|
||||
transition: color 0.2s; /* Smooth transition on hover */
|
||||
}
|
||||
|
||||
|
||||
/* Style for selected rows */
|
||||
tr.row-selected > td {
|
||||
background-color: #e7f3ff; /* Example light blue highlight */
|
||||
}
|
||||
|
||||
/* Ensure checkbox is vertically aligned if needed */
|
||||
.row-selector {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
/* Hide rows not matching the filter */
|
||||
tr.hidden-by-mode {
|
||||
display: none !important; /* Use important to override other display styles if necessary */
|
||||
}
|
||||
tr.hidden-by-search {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* --- Mode Toggle Button Styles --- */
|
||||
#view-mode-toggle {
|
||||
height: 38px; /* Match input height */
|
||||
box-sizing: border-box;
|
||||
flex-shrink: 0; /* Prevent toggle from shrinking on small screens */
|
||||
}
|
||||
.mode-button {
|
||||
transition: background-color 0.2s ease-in-out, color 0.2s ease-in-out;
|
||||
white-space: nowrap; /* Prevent text wrapping */
|
||||
}
|
||||
.mode-button:not(.active) {
|
||||
background-color: #f8f9fa; /* Light grey background */
|
||||
color: #495057; /* Dark grey text */
|
||||
}
|
||||
.mode-button:not(.active):hover {
|
||||
background-color: #e2e6ea; /* Slightly darker grey on hover */
|
||||
}
|
||||
|
||||
/* Style for highlighted rows in view mode */
|
||||
tr.view-highlighted > td {
|
||||
background-color: #fffef5; /* Very light yellow/cream */
|
||||
/* Border moved to specific cell below */
|
||||
}
|
||||
/* Apply border and adjust padding ONLY for the first *visible* cell (Model name) in view mode */
|
||||
tr.view-highlighted > td:nth-child(2) {
|
||||
border-left: 4px solid #ffc107; /* Warning yellow border */
|
||||
/* Original padding is 8px. Subtract border width. */
|
||||
padding-left: 4px;
|
||||
}
|
||||
</style>
|
||||
|
||||
<script>
|
||||
const LEADERBOARD_CUSTOM_TITLE = "Qwen3 results on the aider polyglot benchmark";
|
||||
{% include leaderboard_table.js %}
|
||||
</script>
|
||||
|
||||
|
||||
## OpenRouter only TogetherAI, recommended /no_think settings
|
||||
|
||||
These results were obtained with the
|
||||
[recommended](https://huggingface.co/Qwen/Qwen3-235B-A22B#best-practices)
|
||||
non-thinking model settings in `.aider.model.settings.yml`:
|
||||
|
||||
```yaml
|
||||
- name: openrouter/qwen/qwen3-235b-a22b
|
||||
system_prompt_prefix: "/no_think"
|
||||
use_temperature: 0.7
|
||||
extra_params:
|
||||
max_tokens: 24000
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
min_p: 0.0
|
||||
temperature: 0.7
|
||||
extra_body:
|
||||
provider:
|
||||
order: ["Together"]
|
||||
```
|
||||
|
||||
And then running aider:
|
||||
|
||||
```bash
|
||||
aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
```
|
||||
|
||||
|
||||
## OpenRouter, all providers, default settings (thinking)
|
||||
|
||||
These results were obtained by simply running aider as shown below, without any model specific settings.
|
||||
This should have enabled thinking, assuming upstream API providers honor that convention for Qwen3.
|
||||
|
||||
```bash
|
||||
aider --model openrouter/qwen/qwen3-xxx
|
||||
```
|
||||
|
||||
## VLLM, bfloat16, recommended /no_think
|
||||
|
||||
These [benchmarks results were obtained by GitHub user AlongWY](https://github.com/Aider-AI/aider/pull/3908)
|
||||
with the
|
||||
[recommended](https://huggingface.co/Qwen/Qwen3-235B-A22B#best-practices)
|
||||
non-thinking model settings in `.aider.model.settings.yml`:
|
||||
|
||||
```yaml
|
||||
- name: openai/<model-name>
|
||||
system_prompt_prefix: "/no_think"
|
||||
use_temperature: 0.7
|
||||
extra_params:
|
||||
max_tokens: 24000
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
min_p: 0.0
|
||||
temperature: 0.7
|
||||
```
|
||||
|
||||
And then running aider:
|
||||
|
||||
```bash
|
||||
aider --model openai/<model-name> --openai-api-base <url>
|
||||
```
|
||||
BIN
aider/website/assets/2025-05-08-qwen3.jpg
Normal file
BIN
aider/website/assets/2025-05-08-qwen3.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 221 KiB |
@@ -446,7 +446,7 @@ code, pre, .code-block {
|
||||
}
|
||||
|
||||
.testimonial-text::before {
|
||||
content: "\201C"; /* Opening fancy quote */
|
||||
content: "\201C\00A0"; /* Opening fancy quote */
|
||||
color: var(--primary);
|
||||
margin-right: 4px;
|
||||
vertical-align: -0.3em;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -224,11 +224,11 @@
|
||||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#dirty-commits: true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#attribute-author: true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#attribute-author: xxx
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#attribute-committer: true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#attribute-committer: xxx
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#attribute-commit-message-author: false
|
||||
@@ -236,6 +236,9 @@
|
||||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#attribute-commit-message-committer: false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#attribute-co-authored-by: false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#git-commit-verify: false
|
||||
|
||||
@@ -358,6 +361,9 @@
|
||||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#disable-playwright: false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#file: xxx
|
||||
## Specify multiple values like this:
|
||||
@@ -422,6 +428,9 @@
|
||||
## Specify which editor to use for the /editor command
|
||||
#editor: xxx
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#shell-completions: xxx
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
||||
@@ -213,11 +213,11 @@
|
||||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#AIDER_DIRTY_COMMITS=true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#AIDER_ATTRIBUTE_AUTHOR=true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#AIDER_ATTRIBUTE_AUTHOR=
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#AIDER_ATTRIBUTE_COMMITTER=true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#AIDER_ATTRIBUTE_COMMITTER=
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_AUTHOR=false
|
||||
@@ -225,6 +225,9 @@
|
||||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_COMMITTER=false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#AIDER_ATTRIBUTE_CO_AUTHORED_BY=false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#AIDER_GIT_COMMIT_VERIFY=false
|
||||
|
||||
@@ -339,6 +342,9 @@
|
||||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#AIDER_DISABLE_PLAYWRIGHT=false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#AIDER_FILE=
|
||||
|
||||
@@ -390,6 +396,9 @@
|
||||
## Specify which editor to use for the /editor command
|
||||
#AIDER_EDITOR=
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#AIDER_SHELL_COMPLETIONS=
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
||||
@@ -117,40 +117,6 @@ For example:
|
||||
These settings will be merged with any model-specific settings, with the
|
||||
`aider/extra_params` settings taking precedence for any direct conflicts.
|
||||
|
||||
### Controlling o1 reasoning effort
|
||||
|
||||
You need this chunk of yaml:
|
||||
|
||||
```
|
||||
extra_params:
|
||||
extra_body:
|
||||
reasoning_effort: high
|
||||
```
|
||||
|
||||
This is a full entry for o1 with that setting, obtained by finding the default
|
||||
entry in the list below and adding the above `extra_params` entry:
|
||||
|
||||
```
|
||||
- name: o1
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
send_undo_reply: false
|
||||
lazy: false
|
||||
reminder: user
|
||||
examples_as_sys_msg: false
|
||||
cache_control: false
|
||||
caches_by_default: false
|
||||
use_system_prompt: true
|
||||
use_temperature: false
|
||||
streaming: false
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
extra_params:
|
||||
extra_body:
|
||||
reasoning_effort: high
|
||||
```
|
||||
|
||||
### Default model settings
|
||||
|
||||
Below are all the pre-configured model settings to give a sense for the settings which are supported.
|
||||
@@ -280,6 +246,18 @@ cog.out("```\n")
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
cache_control: true
|
||||
|
||||
- name: azure/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
editor_model_name: azure/gpt-4.1-mini
|
||||
|
||||
- name: azure/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: azure/o1
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
@@ -308,6 +286,18 @@ cog.out("```\n")
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: azure/o3
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
streaming: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: azure/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
@@ -319,6 +309,66 @@ cog.out("```\n")
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
@@ -590,6 +640,13 @@ cog.out("```\n")
|
||||
editor_edit_format: editor-diff
|
||||
reasoning_tag: think
|
||||
|
||||
- name: gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- thinking_tokens
|
||||
|
||||
- name: gemini/gemini-1.5-flash-002
|
||||
|
||||
- name: gemini/gemini-1.5-flash-exp-0827
|
||||
@@ -618,15 +675,30 @@ cog.out("```\n")
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- thinking_tokens
|
||||
|
||||
- name: gemini/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: gemini/gemini-2.0-flash
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: gemini/gemini-2.0-flash
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: gemini/gemini-exp-1114
|
||||
edit_format: diff
|
||||
@@ -717,6 +789,18 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
editor_model_name: gpt-4.1-mini
|
||||
|
||||
- name: gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4.5-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -803,6 +887,18 @@ cog.out("```\n")
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: o3
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
streaming: false
|
||||
editor_model_name: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -814,6 +910,30 @@ cog.out("```\n")
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openai/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
editor_model_name: openai/gpt-4.1-mini
|
||||
|
||||
- name: openai/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: openai/gpt-4.5-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -883,6 +1003,18 @@ cog.out("```\n")
|
||||
editor_model_name: openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openai/o3
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
streaming: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openai/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -894,6 +1026,66 @@ cog.out("```\n")
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/anthropic/claude-3-opus
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
@@ -1032,10 +1224,23 @@ cog.out("```\n")
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: openrouter/google/gemma-3-27b-it
|
||||
use_system_prompt: false
|
||||
@@ -1048,6 +1253,18 @@ cog.out("```\n")
|
||||
weak_model_name: openrouter/meta-llama/llama-3-70b-instruct
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/openai/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
editor_model_name: openrouter/openai/gpt-4.1-mini
|
||||
|
||||
- name: openrouter/openai/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: openrouter/openai/gpt-4o
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
@@ -1088,6 +1305,18 @@ cog.out("```\n")
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/openai/o3
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
streaming: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openai/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
@@ -1110,6 +1339,66 @@ cog.out("```\n")
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openrouter/optimus-alpha
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
@@ -1131,11 +1420,20 @@ cog.out("```\n")
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: openrouter/x-ai/grok-3-fast-beta
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: openrouter/x-ai/grok-3-mini-beta
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/x-ai/grok-3-mini-fast-beta
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
@@ -1149,6 +1447,13 @@ cog.out("```\n")
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai/claude-3-5-haiku@20241022
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
@@ -1199,11 +1504,24 @@ cog.out("```\n")
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-pro-experimental
|
||||
edit_format: diff-fenced
|
||||
@@ -1213,10 +1531,19 @@ cog.out("```\n")
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: xai/grok-3-fast-beta
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: xai/grok-3-mini-beta
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: xai/grok-3-mini-fast-beta
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
```
|
||||
<!--[[[end]]]-->
|
||||
|
||||
|
||||
@@ -278,11 +278,11 @@ cog.outl("```")
|
||||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#dirty-commits: true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#attribute-author: true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#attribute-author: xxx
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#attribute-committer: true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#attribute-committer: xxx
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#attribute-commit-message-author: false
|
||||
@@ -290,6 +290,9 @@ cog.outl("```")
|
||||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#attribute-commit-message-committer: false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#attribute-co-authored-by: false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#git-commit-verify: false
|
||||
|
||||
@@ -412,6 +415,9 @@ cog.outl("```")
|
||||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#disable-playwright: false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#file: xxx
|
||||
## Specify multiple values like this:
|
||||
@@ -476,6 +482,9 @@ cog.outl("```")
|
||||
## Specify which editor to use for the /editor command
|
||||
#editor: xxx
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#shell-completions: xxx
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
||||
@@ -253,11 +253,11 @@ cog.outl("```")
|
||||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#AIDER_DIRTY_COMMITS=true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#AIDER_ATTRIBUTE_AUTHOR=true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#AIDER_ATTRIBUTE_AUTHOR=
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#AIDER_ATTRIBUTE_COMMITTER=true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#AIDER_ATTRIBUTE_COMMITTER=
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_AUTHOR=false
|
||||
@@ -265,6 +265,9 @@ cog.outl("```")
|
||||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_COMMITTER=false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#AIDER_ATTRIBUTE_CO_AUTHORED_BY=false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#AIDER_GIT_COMMIT_VERIFY=false
|
||||
|
||||
@@ -379,6 +382,9 @@ cog.outl("```")
|
||||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#AIDER_DISABLE_PLAYWRIGHT=false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#AIDER_FILE=
|
||||
|
||||
@@ -430,6 +436,9 @@ cog.outl("```")
|
||||
## Specify which editor to use for the /editor command
|
||||
#AIDER_EDITOR=
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#AIDER_SHELL_COMPLETIONS=
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
||||
@@ -79,9 +79,9 @@ for alias, model in sorted(MODEL_ALIASES.items()):
|
||||
- `4-turbo`: gpt-4-1106-preview
|
||||
- `4o`: gpt-4o
|
||||
- `deepseek`: deepseek/deepseek-chat
|
||||
- `flash`: gemini/gemini-2.0-flash-exp
|
||||
- `gemini`: gemini/gemini-2.5-pro-preview-03-25
|
||||
- `gemini-2.5-pro`: gemini/gemini-2.5-pro-exp-03-25
|
||||
- `flash`: gemini/gemini-2.5-flash-preview-04-17
|
||||
- `gemini`: gemini/gemini-2.5-pro-preview-05-06
|
||||
- `gemini-2.5-pro`: gemini/gemini-2.5-pro-preview-05-06
|
||||
- `gemini-exp`: gemini/gemini-2.5-pro-exp-03-25
|
||||
- `grok3`: xai/grok-3-beta
|
||||
- `haiku`: claude-3-5-haiku-20241022
|
||||
|
||||
@@ -56,6 +56,7 @@ usage: aider [-h] [--model] [--openai-api-key] [--anthropic-api-key]
|
||||
[--attribute-committer | --no-attribute-committer]
|
||||
[--attribute-commit-message-author | --no-attribute-commit-message-author]
|
||||
[--attribute-commit-message-committer | --no-attribute-commit-message-committer]
|
||||
[--attribute-co-authored-by | --no-attribute-co-authored-by]
|
||||
[--git-commit-verify | --no-git-commit-verify]
|
||||
[--commit] [--commit-prompt] [--dry-run | --no-dry-run]
|
||||
[--skip-sanity-check-repo]
|
||||
@@ -72,17 +73,19 @@ usage: aider [-h] [--model] [--openai-api-key] [--anthropic-api-key]
|
||||
[--copy-paste | --no-copy-paste] [--apply]
|
||||
[--apply-clipboard-edits] [--exit] [--show-repo-map]
|
||||
[--show-prompts] [--voice-format] [--voice-language]
|
||||
[--voice-input-device] [--file] [--read] [--vim]
|
||||
[--chat-language] [--yes-always] [-v] [--load]
|
||||
[--encoding] [--line-endings] [-c] [--env-file]
|
||||
[--voice-input-device] [--disable-playwright] [--file]
|
||||
[--read] [--vim] [--chat-language] [--yes-always] [-v]
|
||||
[--load] [--encoding] [--line-endings] [-c]
|
||||
[--env-file]
|
||||
[--suggest-shell-commands | --no-suggest-shell-commands]
|
||||
[--fancy-input | --no-fancy-input]
|
||||
[--multiline | --no-multiline]
|
||||
[--notifications | --no-notifications]
|
||||
[--notifications-command]
|
||||
[--detect-urls | --no-detect-urls] [--editor] [--opus]
|
||||
[--sonnet] [--haiku] [--4] [--4o] [--mini] [--4-turbo]
|
||||
[--35turbo] [--deepseek] [--o1-mini] [--o1-preview]
|
||||
[--detect-urls | --no-detect-urls] [--editor]
|
||||
[--shell-completions] [--opus] [--sonnet] [--haiku]
|
||||
[--4] [--4o] [--mini] [--4-turbo] [--35turbo]
|
||||
[--deepseek] [--o1-mini] [--o1-preview]
|
||||
|
||||
```
|
||||
|
||||
@@ -412,16 +415,14 @@ Aliases:
|
||||
- `--no-dirty-commits`
|
||||
|
||||
### `--attribute-author`
|
||||
Attribute aider code changes in the git author name (default: True)
|
||||
Default: True
|
||||
Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
Environment variable: `AIDER_ATTRIBUTE_AUTHOR`
|
||||
Aliases:
|
||||
- `--attribute-author`
|
||||
- `--no-attribute-author`
|
||||
|
||||
### `--attribute-committer`
|
||||
Attribute aider commits in the git committer name (default: True)
|
||||
Default: True
|
||||
Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
Environment variable: `AIDER_ATTRIBUTE_COMMITTER`
|
||||
Aliases:
|
||||
- `--attribute-committer`
|
||||
@@ -443,6 +444,14 @@ Aliases:
|
||||
- `--attribute-commit-message-committer`
|
||||
- `--no-attribute-commit-message-committer`
|
||||
|
||||
### `--attribute-co-authored-by`
|
||||
Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
Default: False
|
||||
Environment variable: `AIDER_ATTRIBUTE_CO_AUTHORED_BY`
|
||||
Aliases:
|
||||
- `--attribute-co-authored-by`
|
||||
- `--no-attribute-co-authored-by`
|
||||
|
||||
### `--git-commit-verify`
|
||||
Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
Default: False
|
||||
@@ -652,6 +661,11 @@ Environment variable: `AIDER_VOICE_INPUT_DEVICE`
|
||||
|
||||
## Other settings:
|
||||
|
||||
### `--disable-playwright`
|
||||
Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
Default: False
|
||||
Environment variable: `AIDER_DISABLE_PLAYWRIGHT`
|
||||
|
||||
### `--file FILE`
|
||||
specify a file to edit (can be used multiple times)
|
||||
Environment variable: `AIDER_FILE`
|
||||
@@ -754,6 +768,10 @@ Aliases:
|
||||
Specify which editor to use for the /editor command
|
||||
Environment variable: `AIDER_EDITOR`
|
||||
|
||||
### `--shell-completions SHELL`
|
||||
Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
Environment variable: `AIDER_SHELL_COMPLETIONS`
|
||||
|
||||
## Deprecated model settings:
|
||||
|
||||
### `--opus`
|
||||
|
||||
@@ -264,13 +264,9 @@ tr:hover { background-color: #f5f5f5; }
|
||||
</style>
|
||||
<table>
|
||||
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
|
||||
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>1,119,621</td><td class='right'>77.4%</td></tr>
|
||||
<tr><td>gemini/gemini-2.5-pro-preview-03-25</td><td class='right'>269,898</td><td class='right'>18.6%</td></tr>
|
||||
<tr><td>openrouter/anthropic/claude-3.7-sonnet</td><td class='right'>18,140</td><td class='right'>1.3%</td></tr>
|
||||
<tr><td>o3-mini</td><td class='right'>17,296</td><td class='right'>1.2%</td></tr>
|
||||
<tr><td>openrouter/x-ai/grok-3-mini-beta</td><td class='right'>16,987</td><td class='right'>1.2%</td></tr>
|
||||
<tr><td>openrouter/REDACTED</td><td class='right'>4,099</td><td class='right'>0.3%</td></tr>
|
||||
<tr><td>xai/grok-3-mini-beta</td><td class='right'>1,224</td><td class='right'>0.1%</td></tr>
|
||||
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>890,057</td><td class='right'>69.9%</td></tr>
|
||||
<tr><td>o3</td><td class='right'>373,753</td><td class='right'>29.4%</td></tr>
|
||||
<tr><td>openrouter/REDACTED</td><td class='right'>8,745</td><td class='right'>0.7%</td></tr>
|
||||
</table>
|
||||
|
||||
{: .note :}
|
||||
@@ -288,6 +284,16 @@ by doing something like `git blame` on the repo,
|
||||
and counting up who wrote all the new lines of code in each release.
|
||||
Only lines in source code files are counted, not documentation or prompt files.
|
||||
|
||||
## Why did aider ignore/discard its proposed edits after it asked to add a new file to the chat?
|
||||
|
||||
If aider prompts you to add a new file to the chat and you say yes,
|
||||
it will re-submit the original request.
|
||||
The fact that the LLM's reply indicated that it needed to see another file (and you said yes)
|
||||
is often a sign that the LLM should have been able to see/edit that file in the first place.
|
||||
Without access to it, there is increased chance that it's done a bad implementation of the requested change.
|
||||
Often LLMs will hallucinate content for the files they needed but didn't have.
|
||||
So aider re-submits the original request in this situation.
|
||||
|
||||
## Why does aider sometimes stop highlighting code in its replies?
|
||||
|
||||
Aider displays the markdown responses that are coming back from the LLM.
|
||||
|
||||
@@ -71,4 +71,6 @@ Additionally, you can use the following options to prefix commit messages:
|
||||
- `--attribute-commit-message-author`: Prefix commit messages with 'aider: ' if aider authored the changes.
|
||||
- `--attribute-commit-message-committer`: Prefix all commit messages with 'aider: ', regardless of whether aider authored the changes or not.
|
||||
|
||||
Both of these options are disabled by default, but can be useful for easily identifying changes made by aider.
|
||||
Finally, you can use `--attribute-co-authored-by` to have aider append a Co-authored-by trailer to the end of the commit string.
|
||||
This will disable appending `(aider)` to the git author and git committer unless you have explicitly enabled those settings.
|
||||
|
||||
|
||||
@@ -180,6 +180,8 @@ cog.out(get_supported_languages_md())
|
||||
| nix | .nix | | ✓ |
|
||||
| nqc | .nqc | | ✓ |
|
||||
| objc | .mm | | ✓ |
|
||||
| ocaml | .ml | ✓ | ✓ |
|
||||
| ocaml_interface | .mli | ✓ | ✓ |
|
||||
| odin | .odin | | ✓ |
|
||||
| org | .org | | ✓ |
|
||||
| pascal | .pas | | ✓ |
|
||||
|
||||
@@ -8,100 +8,261 @@ has_children: true
|
||||
|
||||
# Aider LLM Leaderboards
|
||||
|
||||
Aider works best with LLMs which are good at *editing* code, not just good at writing
|
||||
code.
|
||||
To evaluate an LLM's editing skill, aider uses benchmarks that
|
||||
assess a model's ability to consistently follow the system prompt
|
||||
to successfully edit code.
|
||||
Aider excels with LLMs skilled at writing and *editing* code,
|
||||
and uses benchmarks to
|
||||
evaluate an LLM's ability to follow instructions and edit code successfully without
|
||||
human intervention.
|
||||
[Aider's polyglot benchmark](https://aider.chat/2024/12/21/polyglot.html#the-polyglot-benchmark) tests LLMs on 225 challenging Exercism coding exercises across C++, Go, Java, JavaScript, Python, and Rust.
|
||||
|
||||
The leaderboards report the results from a number of popular LLMs.
|
||||
While [aider can connect to almost any LLM](/docs/llms.html),
|
||||
it works best with models that score well on the benchmarks.
|
||||
<h2 id="leaderboard-title">Aider polyglot coding leaderboard</h2>
|
||||
|
||||
|
||||
## Polyglot leaderboard
|
||||
|
||||
[Aider's polyglot benchmark](https://aider.chat/2024/12/21/polyglot.html#the-polyglot-benchmark)
|
||||
asks the LLM to edit source files to complete 225 coding exercises
|
||||
from Exercism.
|
||||
It contains exercises in many popular programming languages:
|
||||
C++, Go, Java, JavaScript, Python and Rust.
|
||||
The 225 exercises were purposely selected to be the *hardest*
|
||||
that Exercism offered in those languages, to provide
|
||||
a strong coding challenge to LLMs.
|
||||
|
||||
This benchmark measures the LLM's coding ability in popular languages,
|
||||
and whether it can
|
||||
write new code that integrates into existing code.
|
||||
The model also has to successfully apply all its changes to the source file without human intervention.
|
||||
|
||||
<input type="text" id="editSearchInput" placeholder="Search..." style="width: 100%; max-width: 800px; margin: 10px auto; padding: 8px; display: block; border: 1px solid #ddd; border-radius: 4px;">
|
||||
<div id="controls-container" style="display: flex; align-items: center; width: 100%; max-width: 800px; margin: 10px auto; gap: 10px; box-sizing: border-box; padding: 0 5px; position: relative;">
|
||||
<input type="text" id="editSearchInput" placeholder="Search..." style="flex-grow: 1; padding: 8px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
<div id="view-mode-toggle" style="display: inline-flex; border: 1px solid #ccc; border-radius: 4px;">
|
||||
<button id="mode-view-btn" class="mode-button active" data-mode="view" style="padding: 8px 8px; border: none; border-radius: 3px 0 0 3px; cursor: pointer; font-size: 14px; line-height: 1.5; min-width: 50px;">View</button>
|
||||
<button id="mode-select-btn" class="mode-button" data-mode="select" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Select</button>
|
||||
<button id="mode-detail-btn" class="mode-button" data-mode="detail" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0 3px 3px 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Detail</button>
|
||||
</div>
|
||||
<button id="close-controls-btn" style="width: 18px; height: 18px; padding: 0; border: 1px solid #ddd; border-radius: 50%; background-color: transparent; cursor: pointer; display: flex; align-items: center; justify-content: center; font-size: 12px; margin-left: 4px; color: #999;">×</button>
|
||||
</div>
|
||||
|
||||
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
|
||||
<thead style="background-color: #f2f2f2;">
|
||||
<tr>
|
||||
<th style="padding: 8px; width: 40px; text-align: center; vertical-align: middle;">
|
||||
<input type="checkbox" id="select-all-checkbox" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</th> <!-- Header checkbox added here -->
|
||||
<th style="padding: 8px; text-align: left;">Model</th>
|
||||
<th style="padding: 8px; text-align: center;">Percent correct</th>
|
||||
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left;">Command</th>
|
||||
<th style="padding: 8px; text-align: center;">Edit format</th>
|
||||
<th style="padding: 8px; text-align: center;">Cost</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Percent correct</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Cost</th>
|
||||
<th style="padding: 8px; text-align: left;" class="col-command">Command</th>
|
||||
<th style="padding: 8px; text-align: center; width: 10%" class="col-conform">Correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left; width: 10%" class="col-edit-format">Edit Format</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% assign max_cost = 0 %}
|
||||
{% for row in site.data.polyglot_leaderboard %}
|
||||
{% if row.total_cost > max_cost %}
|
||||
{% assign max_cost = row.total_cost %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if max_cost == 0 %}{% assign max_cost = 1 %}{% endif %}
|
||||
{% assign edit_sorted = site.data.polyglot_leaderboard | sort: 'pass_rate_2' | reverse %}
|
||||
{% for row in edit_sorted %}
|
||||
<tr style="border-bottom: 1px solid #ddd;">
|
||||
<td style="padding: 8px;">{{ row.model }}</td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
|
||||
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
|
||||
<td style="padding: 8px; text-align: center;">{% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %}</td>
|
||||
{% for row in edit_sorted %} {% comment %} Add loop index for unique IDs {% endcomment %}
|
||||
{% assign row_index = forloop.index0 %}
|
||||
<tr id="main-row-{{ row_index }}">
|
||||
<td style="padding: 8px; text-align: center; vertical-align: middle;">
|
||||
<button class="toggle-details" data-target="details-{{ row_index }}" style="background: none; border: none; cursor: pointer; font-size: 16px; padding: 0; vertical-align: middle;">▶</button>
|
||||
<input type="checkbox" class="row-selector" data-row-index="{{ row_index }}" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</td>
|
||||
<td style="padding: 8px;"><span>{{ row.model }}</span></td>
|
||||
<td class="bar-cell">
|
||||
<div class="bar-viz" style="width: {{ row.pass_rate_2 }}%; background-color: rgba(40, 167, 69, 0.3); border-right: 1px solid rgba(40, 167, 69, 0.5);"></div>
|
||||
<span>{{ row.pass_rate_2 }}%</span>
|
||||
</td>
|
||||
<td class="bar-cell cost-bar-cell">
|
||||
{% if row.total_cost > 0 %}
|
||||
<div class="bar-viz cost-bar" data-cost="{{ row.total_cost }}" data-max-cost="{{ max_cost }}" style="width: 0%; background-color: rgba(13, 110, 253, 0.3); border-right: 1px solid rgba(13, 110, 253, 0.5);"></div>
|
||||
{% endif %}
|
||||
{% assign rounded_cost = row.total_cost | times: 1.0 | round: 2 %}
|
||||
<span>{% if row.total_cost == 0 or rounded_cost == 0.00 %}{% else %}${{ rounded_cost }}{% endif %}</span>
|
||||
</td>
|
||||
<td style="padding: 8px;" class="col-command"><span><code>{{ row.command }}</code></span></td>
|
||||
<td style="padding: 8px; text-align: center;" class="col-conform"><span>{{ row.percent_cases_well_formed }}%</span></td>
|
||||
<td style="padding: 8px;" class="col-edit-format"><span>{{ row.edit_format }}</span></td>
|
||||
</tr>
|
||||
<tr class="details-row" id="details-{{ row_index }}" style="display: none; background-color: #f9f9f9;">
|
||||
<td colspan="7" style="padding: 15px; border-bottom: 1px solid #ddd;">
|
||||
<ul style="margin: 0; padding-left: 20px; list-style: none; border-bottom: 1px solid #ddd;">
|
||||
{% for pair in row %}
|
||||
{% if pair[1] != "" and pair[1] != nil %}
|
||||
<li><strong>
|
||||
{% if pair[0] == 'percent_cases_well_formed' %}
|
||||
Percent cases well formed
|
||||
{% else %}
|
||||
{{ pair[0] | replace: '_', ' ' | capitalize }}
|
||||
{% endif %}
|
||||
:</strong>
|
||||
{% if pair[0] == 'command' %}<code>{{ pair[1] }}</code>{% else %}{{ pair[1] }}{% endif %}
|
||||
</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Aider polyglot benchmark results
|
||||
|
||||
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
|
||||
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script>
|
||||
{% assign data_source = edit_sorted %}
|
||||
{% assign pass_rate_field = "pass_rate_2" %}
|
||||
{% assign highlight_model = "xxxxxx" %}
|
||||
{% include leaderboard.js %}
|
||||
</script>
|
||||
<style>
|
||||
#leaderboard-title {
|
||||
margin-bottom: 20px; /* Add space below the title */
|
||||
}
|
||||
tr.selected {
|
||||
color: #0056b3;
|
||||
}
|
||||
table {
|
||||
table-layout: fixed;
|
||||
}
|
||||
thead {
|
||||
border-top: 1px solid #ddd; /* Add top border to header */
|
||||
}
|
||||
td, th {
|
||||
border: none; /* Remove internal cell borders */
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
vertical-align: middle; /* Ensure consistent vertical alignment */
|
||||
}
|
||||
td:nth-child(3), td:nth-child(4) {
|
||||
font-size: 12px;
|
||||
tbody tr {
|
||||
height: 50px; /* Set a minimum height for all data rows */
|
||||
}
|
||||
|
||||
/* Hide command and edit format columns on mobile */
|
||||
td.col-command { /* Command column */
|
||||
font-size: 12px; /* Keep font size adjustment for command column if desired, or remove */
|
||||
}
|
||||
|
||||
/* Hide new columns first on smaller screens */
|
||||
@media screen and (max-width: 991px) {
|
||||
th.col-conform, td.col-conform,
|
||||
th.col-edit-format, td.col-edit-format {
|
||||
display: none;
|
||||
}
|
||||
/* Increase width of Percent correct and Cost columns when others are hidden */
|
||||
th:nth-child(3), td:nth-child(3), /* Percent correct */
|
||||
th:nth-child(4), td:nth-child(4) { /* Cost */
|
||||
width: 33% !important; /* Override inline style */
|
||||
}
|
||||
}
|
||||
|
||||
/* Hide command column on even smaller screens */
|
||||
@media screen and (max-width: 767px) {
|
||||
th:nth-child(4), td:nth-child(4), /* Command column */
|
||||
th:nth-child(5), td:nth-child(5) { /* Edit format column */
|
||||
th.col-command, td.col-command { /* Command column */
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* --- Control Styles --- */
|
||||
#controls-container {
|
||||
margin-bottom: 20px; /* Add some space below controls */
|
||||
}
|
||||
|
||||
#editSearchInput, #view-mode-select {
|
||||
padding: 8px 12px; /* Consistent padding */
|
||||
border: 1px solid #ccc; /* Slightly softer border */
|
||||
border-radius: 4px;
|
||||
font-size: 14px; /* Match table font size */
|
||||
height: 38px; /* Match height */
|
||||
box-sizing: border-box; /* Include padding/border in height */
|
||||
}
|
||||
|
||||
|
||||
.bar-cell {
|
||||
position: relative; /* Positioning context for the bar */
|
||||
padding: 8px;
|
||||
/* text-align: center; Removed */
|
||||
overflow: hidden; /* Prevent bar from overflowing cell boundaries if needed */
|
||||
}
|
||||
.cost-bar-cell {
|
||||
background-image: none; /* Remove default gradient for cost cells */
|
||||
}
|
||||
.percent-tick, .cost-tick {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
transform: translateY(10px);
|
||||
height: 8px; /* Short tick */
|
||||
width: 1px;
|
||||
background-color: rgba(170, 170, 170, 0.5);
|
||||
z-index: 2; /* Above the bar but below the text */
|
||||
}
|
||||
.bar-viz {
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 50%; /* Position at the middle of the cell */
|
||||
transform: translateY(-50%); /* Center the bar vertically */
|
||||
z-index: 1; /* Above background, below ticks and text */
|
||||
height: 36px;
|
||||
border-radius: 0 2px 2px 0; /* Slightly rounded end corners */
|
||||
/* Width and colors are set inline via style attribute */
|
||||
}
|
||||
/* Add a tooltip class for showing cost information on hover */
|
||||
.cost-bar-cell:hover .bar-viz[style*="background-image"] {
|
||||
animation: stripe-animation 2s linear infinite;
|
||||
}
|
||||
@keyframes stripe-animation {
|
||||
0% { background-position: 0 0; }
|
||||
100% { background-position: 20px 0; }
|
||||
}
|
||||
.bar-cell span {
|
||||
position: absolute; /* Position relative to the cell */
|
||||
left: 5px; /* Position slightly inside the left edge */
|
||||
top: 50%; /* Center vertically */
|
||||
transform: translateY(-50%); /* Adjust vertical centering */
|
||||
z-index: 3; /* Ensure text is above everything else */
|
||||
background-color: rgba(255, 255, 255, 0.7); /* Semi-transparent white background */
|
||||
padding: 0 4px; /* Add padding around the text */
|
||||
border-radius: 3px; /* Rounded corners for the text background */
|
||||
font-size: 14px; /* Adjust font size for the numbers */
|
||||
}
|
||||
.toggle-details {
|
||||
color: #888; /* Make toggle symbol more subtle */
|
||||
transition: color 0.2s; /* Smooth transition on hover */
|
||||
}
|
||||
|
||||
|
||||
/* Style for selected rows */
|
||||
tr.row-selected > td {
|
||||
background-color: #e7f3ff; /* Example light blue highlight */
|
||||
}
|
||||
|
||||
/* Ensure checkbox is vertically aligned if needed */
|
||||
.row-selector {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
/* Hide rows not matching the filter */
|
||||
tr.hidden-by-mode {
|
||||
display: none !important; /* Use important to override other display styles if necessary */
|
||||
}
|
||||
tr.hidden-by-search {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* --- Mode Toggle Button Styles --- */
|
||||
#view-mode-toggle {
|
||||
height: 38px; /* Match input height */
|
||||
box-sizing: border-box;
|
||||
flex-shrink: 0; /* Prevent toggle from shrinking on small screens */
|
||||
}
|
||||
.mode-button {
|
||||
transition: background-color 0.2s ease-in-out, color 0.2s ease-in-out;
|
||||
white-space: nowrap; /* Prevent text wrapping */
|
||||
}
|
||||
.mode-button:not(.active) {
|
||||
background-color: #f8f9fa; /* Light grey background */
|
||||
color: #495057; /* Dark grey text */
|
||||
}
|
||||
.mode-button:not(.active):hover {
|
||||
background-color: #e2e6ea; /* Slightly darker grey on hover */
|
||||
}
|
||||
|
||||
/* Style for highlighted rows in view mode */
|
||||
tr.view-highlighted > td {
|
||||
background-color: #fffef5; /* Very light yellow/cream */
|
||||
/* Border moved to specific cell below */
|
||||
}
|
||||
/* Apply border and adjust padding ONLY for the first *visible* cell (Model name) in view mode */
|
||||
tr.view-highlighted > td:nth-child(2) {
|
||||
border-left: 4px solid #ffc107; /* Warning yellow border */
|
||||
/* Original padding is 8px. Subtract border width. */
|
||||
padding-left: 4px;
|
||||
}
|
||||
</style>
|
||||
|
||||
<script>
|
||||
{% include leaderboard_table.js %}
|
||||
</script>
|
||||
|
||||
|
||||
|
||||
<p class="post-date">
|
||||
<p class="post-date" style="margin-top: 20px;">
|
||||
By Paul Gauthier,
|
||||
last updated
|
||||
<!--[[[cog
|
||||
@@ -124,6 +285,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
|
||||
latest_mod_date = max(mod_dates)
|
||||
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
|
||||
]]]-->
|
||||
April 12, 2025.
|
||||
May 08, 2025.
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
||||
@@ -9,8 +9,7 @@ nav_order: 800
|
||||
|
||||
All pricing information is the cost to run the benchmark at the time it was
|
||||
run.
|
||||
Providers change their pricing, and every benchmark run ends up with a slightly
|
||||
different cost.
|
||||
Providers change their pricing and sometimes introduce entirely novel pricing structures.
|
||||
Pricing is provided on a *best efforts* basis, and may not always be current
|
||||
or fully accurate.
|
||||
|
||||
|
||||
@@ -16,9 +16,10 @@ description: Aider can connect to most LLMs for AI pair programming.
|
||||
|
||||
Aider works best with these models, which are skilled at editing code:
|
||||
|
||||
- [Gemini 2.5 Pro](/docs/llms/gemini.html)
|
||||
- [DeepSeek R1 and V3](/docs/llms/deepseek.html)
|
||||
- [Claude 3.7 Sonnet](/docs/llms/anthropic.html)
|
||||
- [OpenAI o1, o3-mini and GPT-4o](/docs/llms/openai.html)
|
||||
- [OpenAI o3, o4-mini and GPT-4.1](/docs/llms/openai.html)
|
||||
|
||||
|
||||
## Free models
|
||||
@@ -26,10 +27,8 @@ Aider works best with these models, which are skilled at editing code:
|
||||
|
||||
Aider works with a number of **free** API providers:
|
||||
|
||||
- Google's [Gemini 1.5 Pro](/docs/llms/gemini.html) works with aider, with
|
||||
code editing capabilities similar to GPT-3.5.
|
||||
- You can use [Llama 3 70B on Groq](/docs/llms/groq.html) which is comparable to GPT-3.5 in code editing performance.
|
||||
- Cohere also offers free API access to their [Command-R+ model](/docs/llms/cohere.html), which works with aider as a *very basic* coding assistant.
|
||||
- [OpenRouter offers free access to many models](https://openrouter.ai/models/?q=free), with limitations on daily usage.
|
||||
- Google's [Gemini 2.5 Pro Exp](/docs/llms/gemini.html) works very well with aider.
|
||||
|
||||
## Local models
|
||||
{: .no_toc }
|
||||
|
||||
@@ -10,21 +10,26 @@ To work with Anthropic's models, you need to provide your
|
||||
either in the `ANTHROPIC_API_KEY` environment variable or
|
||||
via the `--anthropic-api-key` command line switch.
|
||||
|
||||
Aider has some built in shortcuts for the most popular Anthropic models and
|
||||
has been tested and benchmarked to work well with them:
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys:
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
export ANTHROPIC_API_KEY=<key> # Mac/Linux
|
||||
setx ANTHROPIC_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
Start working with aider and Anthropic on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# Aider uses Claude 3.7 Sonnet by default
|
||||
aider
|
||||
|
||||
# Claude 3 Opus
|
||||
aider --model claude-3-opus-20240229
|
||||
|
||||
# List models available from Anthropic
|
||||
aider --list-models anthropic/
|
||||
```
|
||||
|
||||
@@ -7,9 +7,13 @@ nav_order: 500
|
||||
|
||||
Aider can connect to the OpenAI models on Azure.
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys and endpoint:
|
||||
|
||||
```
|
||||
# Mac/Linux:
|
||||
export AZURE_API_KEY=<key>
|
||||
export AZURE_API_VERSION=2024-12-01-preview
|
||||
@@ -20,6 +24,13 @@ setx AZURE_API_KEY <key>
|
||||
setx AZURE_API_VERSION 2024-12-01-preview
|
||||
setx AZURE_API_BASE https://myendpt.openai.azure.com
|
||||
# ... restart your shell after setx commands
|
||||
```
|
||||
|
||||
Start working with aider and Azure on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
aider --model azure/<your_model_deployment_name>
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ nav_order: 560
|
||||
# Amazon Bedrock
|
||||
|
||||
Aider can connect to models provided by Amazon Bedrock.
|
||||
You will need to have an AWS account with access to the Bedrock service.
|
||||
|
||||
To configure Aider to use the Amazon Bedrock API, you need to set up your AWS credentials.
|
||||
This can be done using the AWS CLI or by setting environment variables.
|
||||
|
||||
@@ -37,6 +35,14 @@ feature, you will receive an error message like the following:
|
||||
anthropic.claude-3-7-sonnet-20250219-v1:0 with on-demand throughput isn\xe2\x80\x99t supported. Retry your
|
||||
request with the ID or ARN of an inference profile that contains this model."}'
|
||||
|
||||
## Installation and Configuration
|
||||
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Next, configure your AWS credentials. This can be done using the AWS CLI or by setting environment variables.
|
||||
|
||||
## AWS CLI Configuration
|
||||
|
||||
If you haven't already, install the [AWS CLI](https://aws.amazon.com/cli/) and configure it with your credentials:
|
||||
@@ -49,7 +55,7 @@ This will prompt you to enter your AWS Access Key ID, Secret Access Key, and def
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Alternatively, you can set the following environment variables:
|
||||
You can set the following environment variables:
|
||||
|
||||
```bash
|
||||
export AWS_REGION=your_preferred_region
|
||||
@@ -75,32 +81,15 @@ $env:AWS_SECRET_ACCESS_KEY = 'your_secret_key'
|
||||
$env:AWS_REGION = 'us-west-2' # Put whichever AWS region that you'd like, that the Bedrock service supports.
|
||||
```
|
||||
|
||||
## Install boto3
|
||||
|
||||
The AWS Bedrock provider requires the `boto3` package in order to function correctly:
|
||||
|
||||
```bash
|
||||
pip install boto3
|
||||
```
|
||||
|
||||
To use aider installed via `pipx` with AWS Bedrock, you must add the `boto3` dependency to aider's virtual environment by running
|
||||
|
||||
```bash
|
||||
pipx inject aider-chat boto3
|
||||
```
|
||||
|
||||
You must install `boto3` dependency to aider's virtual environment installed via one-liner or uv by running
|
||||
|
||||
```bash
|
||||
uv tool run --from aider-chat pip install boto3
|
||||
```
|
||||
|
||||
|
||||
## Running Aider with Bedrock
|
||||
## Get Started
|
||||
|
||||
Once your AWS credentials are set up, you can run Aider with the `--model` command line switch, specifying the Bedrock model you want to use:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
aider --model bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0
|
||||
```
|
||||
|
||||
@@ -121,6 +110,20 @@ aider --list-models bedrock/
|
||||
|
||||
Make sure you have access to these models in your AWS account before attempting to use them with Aider.
|
||||
|
||||
## Install boto3
|
||||
You may need to install the `boto3` package.
|
||||
|
||||
```bash
|
||||
# If you installed with aider-install or `uv tool`
|
||||
uv tool run --from aider-chat pip install boto3
|
||||
|
||||
# Or with pipx...
|
||||
pipx inject aider-chat boto3
|
||||
|
||||
# Or with pip
|
||||
pip install -U boto3
|
||||
```
|
||||
|
||||
# More info
|
||||
|
||||
For more information on Amazon Bedrock and its models, refer to the [official AWS documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/what-is-bedrock.html).
|
||||
|
||||
@@ -10,13 +10,22 @@ Their Command-R+ model works well with aider
|
||||
as a *very basic* coding assistant.
|
||||
You'll need a [Cohere API key](https://dashboard.cohere.com/welcome/login).
|
||||
|
||||
To use **Command-R+**:
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys:
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
export COHERE_API_KEY=<key> # Mac/Linux
|
||||
setx COHERE_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
Start working with aider and Cohere on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
aider --model command-r-plus-08-2024
|
||||
|
||||
|
||||
@@ -9,11 +9,22 @@ Aider can connect to the DeepSeek.com API.
|
||||
To work with DeepSeek's models, you need to set the `DEEPSEEK_API_KEY` environment variable with your [DeepSeek API key](https://platform.deepseek.com/api_keys).
|
||||
The DeepSeek Chat V3 model has a top score on aider's code editing benchmark.
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys:
|
||||
|
||||
```
|
||||
export DEEPSEEK_API_KEY=<key> # Mac/Linux
|
||||
setx DEEPSEEK_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
Start working with aider and DeepSeek on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# Use DeepSeek Chat v3
|
||||
aider --model deepseek/deepseek-chat
|
||||
|
||||
@@ -7,22 +7,43 @@ nav_order: 300
|
||||
|
||||
You'll need a [Gemini API key](https://aistudio.google.com/app/u/2/apikey).
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
First, install aider:
|
||||
|
||||
# You may need to install google-generativeai
|
||||
pip install -U google-generativeai
|
||||
{% include install.md %}
|
||||
|
||||
# Or with pipx...
|
||||
pipx inject aider-chat google-generativeai
|
||||
Then configure your API keys:
|
||||
|
||||
```bash
|
||||
export GEMINI_API_KEY=<key> # Mac/Linux
|
||||
setx GEMINI_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
# You can run the Gemini 2.5 Pro model with:
|
||||
aider --model gemini-2.5-pro
|
||||
Start working with aider and Gemini on your codebase:
|
||||
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# You can run the Gemini 2.5 Pro model with this shortcut:
|
||||
aider --model gemini
|
||||
|
||||
# You can run the Gemini 2.5 Pro Exp for free, with usage limits:
|
||||
aider --model gemini-exp
|
||||
|
||||
# List models available from Gemini
|
||||
aider --list-models gemini/
|
||||
```
|
||||
|
||||
You may need to install the `google-generativeai` package.
|
||||
|
||||
```bash
|
||||
# If you installed with aider-install or `uv tool`
|
||||
uv tool run --from aider-chat pip install google-generativeai
|
||||
|
||||
# Or with pipx...
|
||||
pipx inject aider-chat google-generativeai
|
||||
|
||||
# Or with pip
|
||||
pip install -U google-generativeai
|
||||
```
|
||||
|
||||
@@ -10,13 +10,22 @@ The Llama 3 70B model works
|
||||
well with aider and is comparable to GPT-3.5 in code editing performance.
|
||||
You'll need a [Groq API key](https://console.groq.com/keys).
|
||||
|
||||
To use **Llama3 70B**:
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys:
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
export GROQ_API_KEY=<key> # Mac/Linux
|
||||
setx GROQ_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
Start working with aider and Groq on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
aider --model groq/llama3-70b-8192
|
||||
|
||||
|
||||
@@ -5,11 +5,15 @@ nav_order: 400
|
||||
|
||||
# LM Studio
|
||||
|
||||
To use LM Studio:
|
||||
Aider can connect to models served by LM Studio.
|
||||
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API key and endpoint:
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
# Must set a value here even if its a dummy value
|
||||
export LM_STUDIO_API_KEY=dummy-api-key # Mac/Linux
|
||||
setx LM_STUDIO_API_KEY dummy-api-key # Windows, restart shell after setx
|
||||
@@ -17,12 +21,19 @@ setx LM_STUDIO_API_KEY dummy-api-key # Windows, restart shell after setx
|
||||
# LM Studio default server URL is http://localhost:1234/v1
|
||||
export LM_STUDIO_API_BASE=http://localhost:1234/v1 # Mac/Linux
|
||||
setx LM_STUDIO_API_BASE http://localhost:1234/v1 # Windows, restart shell after setx
|
||||
|
||||
aider --model lm_studio/<your-model-name>
|
||||
```
|
||||
|
||||
**Note:** Even though LM Studio doesn't require an API Key out of the box the `LM_STUDIO_API_KEY` must have a dummy value like `dummy-api-key` set or the client request will fail trying to send an empty `Bearer` token.
|
||||
|
||||
Start working with aider and LM Studio on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
aider --model lm_studio/<your-model-name>
|
||||
```
|
||||
|
||||
See the [model warnings](warnings.html)
|
||||
section for information on warnings which will occur
|
||||
when working with models that aider is not familiar with.
|
||||
|
||||
@@ -7,6 +7,19 @@ nav_order: 500
|
||||
|
||||
Aider can connect to local Ollama models.
|
||||
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your Ollama API endpoint (usually the default):
|
||||
|
||||
```bash
|
||||
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
|
||||
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
Start working with aider and Ollama on your codebase:
|
||||
|
||||
```
|
||||
# Pull the model
|
||||
ollama pull <model>
|
||||
@@ -14,11 +27,8 @@ ollama pull <model>
|
||||
# Start your ollama server, increasing the context window to 8k tokens
|
||||
OLLAMA_CONTEXT_LENGTH=8192 ollama serve
|
||||
|
||||
# In another terminal window...
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
|
||||
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
|
||||
# In another terminal window, change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
aider --model ollama_chat/<model>
|
||||
```
|
||||
|
||||
@@ -7,10 +7,13 @@ nav_order: 500
|
||||
|
||||
Aider can connect to any LLM which is accessible via an OpenAI compatible API endpoint.
|
||||
|
||||
```
|
||||
python -m pip install aider-install
|
||||
aider-install
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API key and endpoint:
|
||||
|
||||
```
|
||||
# Mac/Linux:
|
||||
export OPENAI_API_BASE=<endpoint>
|
||||
export OPENAI_API_KEY=<key>
|
||||
@@ -19,6 +22,13 @@ export OPENAI_API_KEY=<key>
|
||||
setx OPENAI_API_BASE <endpoint>
|
||||
setx OPENAI_API_KEY <key>
|
||||
# ... restart shell after setx commands
|
||||
```
|
||||
|
||||
Start working with aider and your OpenAI compatible API on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# Prefix the model name with openai/
|
||||
aider --model openai/<model-name>
|
||||
|
||||
@@ -10,27 +10,34 @@ To work with OpenAI's models, you need to provide your
|
||||
either in the `OPENAI_API_KEY` environment variable or
|
||||
via the `--api-key openai=<key>` command line switch.
|
||||
|
||||
Aider has some built in shortcuts for the most popular OpenAI models and
|
||||
has been tested and benchmarked to work well with them:
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys:
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
export OPENAI_API_KEY=<key> # Mac/Linux
|
||||
setx OPENAI_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
Start working with aider and OpenAI on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# o3-mini
|
||||
aider --model o3-mini --api-key openai=<key>
|
||||
aider --model o3-mini
|
||||
|
||||
# o1-mini
|
||||
aider --model o1-mini --api-key openai=<key>
|
||||
aider --model o1-mini
|
||||
|
||||
# GPT-4o
|
||||
aider --model gpt-4o --api-key openai=<key>
|
||||
aider --model gpt-4o
|
||||
|
||||
# List models available from OpenAI
|
||||
aider --list-models openai/
|
||||
|
||||
# You can also store you API key in environment variables (or .env)
|
||||
export OPENAI_API_KEY=<key> # Mac/Linux
|
||||
setx OPENAI_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
You can use `aider --model <model-name>` to use any other OpenAI model.
|
||||
|
||||
@@ -8,11 +8,22 @@ nav_order: 500
|
||||
Aider can connect to [models provided by OpenRouter](https://openrouter.ai/models?o=top-weekly):
|
||||
You'll need an [OpenRouter API key](https://openrouter.ai/keys).
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys:
|
||||
|
||||
```
|
||||
export OPENROUTER_API_KEY=<key> # Mac/Linux
|
||||
setx OPENROUTER_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
Start working with aider and OpenRouter on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# Or any other open router model
|
||||
aider --model openrouter/<provider>/<model>
|
||||
@@ -23,16 +34,6 @@ aider --list-models openrouter/
|
||||
|
||||
In particular, many aider users access Sonnet via OpenRouter:
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
export OPENROUTER_API_KEY=<key> # Mac/Linux
|
||||
setx OPENROUTER_API_KEY <key> # Windows, restart shell after setx
|
||||
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet
|
||||
```
|
||||
|
||||
|
||||
{: .tip }
|
||||
If you get errors, check your
|
||||
[OpenRouter privacy settings](https://openrouter.ai/settings/privacy).
|
||||
|
||||
@@ -55,8 +55,8 @@ lines = run(
|
||||
lines = ['- ' + line for line in lines.splitlines(keepends=True)]
|
||||
cog.out(''.join(lines))
|
||||
]]]-->
|
||||
- ALEPHALPHA_API_KEY
|
||||
- ALEPH_ALPHA_API_KEY
|
||||
- ALEPHALPHA_API_KEY
|
||||
- ANTHROPIC_API_KEY
|
||||
- ANYSCALE_API_KEY
|
||||
- AZURE_AI_API_KEY
|
||||
@@ -66,18 +66,19 @@ cog.out(''.join(lines))
|
||||
- CEREBRAS_API_KEY
|
||||
- CLARIFAI_API_KEY
|
||||
- CLOUDFLARE_API_KEY
|
||||
- CO_API_KEY
|
||||
- CODESTRAL_API_KEY
|
||||
- COHERE_API_KEY
|
||||
- CO_API_KEY
|
||||
- DATABRICKS_API_KEY
|
||||
- DEEPINFRA_API_KEY
|
||||
- DEEPSEEK_API_KEY
|
||||
- FIREWORKSAI_API_KEY
|
||||
- FIREWORKS_AI_API_KEY
|
||||
- FIREWORKS_API_KEY
|
||||
- FIREWORKSAI_API_KEY
|
||||
- GEMINI_API_KEY
|
||||
- GROQ_API_KEY
|
||||
- HUGGINGFACE_API_KEY
|
||||
- INFINITY_API_KEY
|
||||
- MARITALK_API_KEY
|
||||
- MISTRAL_API_KEY
|
||||
- NLP_CLOUD_API_KEY
|
||||
|
||||
@@ -13,6 +13,10 @@ or service account with permission to use the Vertex AI API.
|
||||
With your chosen login method, the gcloud CLI should automatically set the
|
||||
`GOOGLE_APPLICATION_CREDENTIALS` environment variable which points to the credentials file.
|
||||
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
To configure Aider to use the Vertex AI API, you need to set `VERTEXAI_PROJECT` (the GCP project ID)
|
||||
and `VERTEXAI_LOCATION` (the GCP region) [environment variables for Aider](/docs/config/dotenv.html).
|
||||
|
||||
@@ -27,9 +31,12 @@ VERTEXAI_PROJECT=my-project
|
||||
VERTEXAI_LOCATION=us-east5
|
||||
```
|
||||
|
||||
Then you can run aider with the `--model` command line switch, like this:
|
||||
Start working with aider and Vertex AI on your codebase:
|
||||
|
||||
```
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
aider --model vertex_ai/claude-3-5-sonnet@20240620
|
||||
```
|
||||
|
||||
|
||||
@@ -7,18 +7,47 @@ nav_order: 400
|
||||
|
||||
You'll need a [xAI API key](https://console.x.ai.).
|
||||
|
||||
To use xAI:
|
||||
First, install aider:
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
{% include install.md %}
|
||||
|
||||
Then configure your API keys:
|
||||
|
||||
```bash
|
||||
export XAI_API_KEY=<key> # Mac/Linux
|
||||
setx XAI_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
aider --model xai/grok-beta
|
||||
Start working with aider and xAI on your codebase:
|
||||
|
||||
```bash
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# Grok 3
|
||||
aider --model xai/grok-3-beta
|
||||
|
||||
# Grok 3 fast (faster, more expensive)
|
||||
aider --model xai/grok-3-fast-beta
|
||||
|
||||
# Grok 3 Mini
|
||||
aider --model xai/grok-3-mini-beta
|
||||
|
||||
# Grok 3 Mini fast (faster, more expensive)
|
||||
aider --model xai/grok-3-mini-fast-beta
|
||||
|
||||
# List models available from xAI
|
||||
aider --list-models xai/
|
||||
```
|
||||
|
||||
The Grok 3 Mini models support the `--reasoning-effort` flag.
|
||||
See the [reasoning settings documentation](../config/reasoning.md) for details.
|
||||
Example:
|
||||
|
||||
```bash
|
||||
aider --model xai/grok-3-mini-beta --reasoning-effort high
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ First, aider will check which
|
||||
[keys you have provided via the environment, config files, or command line arguments](https://aider.chat/docs/config/api-keys.html).
|
||||
Based on the available keys, aider will select the best model to use.
|
||||
|
||||
## OpenRouter
|
||||
|
||||
If you have not provided any keys, aider will offer to help you connect to
|
||||
[OpenRouter](http://openrouter.ai)
|
||||
which provides both free and paid access to most popular LLMs.
|
||||
|
||||
@@ -52,8 +52,8 @@ the script as your linter.
|
||||
# Second attempt will not do anything and exit 0 unless there's a real problem beyond
|
||||
# the code formatting that was completed.
|
||||
|
||||
pre-commit run --files $* >/dev/null \
|
||||
|| pre-commit run --files $*
|
||||
pre-commit run --files "$@" >/dev/null \
|
||||
|| pre-commit run --files "$@"
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
@@ -27,7 +27,7 @@ layout: none
|
||||
<a href="#features">Features</a>
|
||||
<a href="#getting-started">Getting Started</a>
|
||||
<a href="/docs/">Documentation</a>
|
||||
<a href="https://discord.gg/Tv2uQnR88V">Discord</a>
|
||||
<a href="https://discord.gg/Y7X7bhMQFV">Discord</a>
|
||||
<a href="https://github.com/Aider-AI/aider">GitHub</a>
|
||||
</div>
|
||||
</nav>
|
||||
@@ -69,11 +69,11 @@ cog.out(text)
|
||||
]]]-->
|
||||
<a href="https://github.com/Aider-AI/aider" class="github-badge badge-stars" title="Total number of GitHub stars the Aider project has received">
|
||||
<span class="badge-label">⭐ GitHub Stars</span>
|
||||
<span class="badge-value">31K</span>
|
||||
<span class="badge-value">33K</span>
|
||||
</a>
|
||||
<a href="https://pypi.org/project/aider-chat/" class="github-badge badge-installs" title="Total number of installations via pip from PyPI">
|
||||
<span class="badge-label">📦 Installs</span>
|
||||
<span class="badge-value">1.9M</span>
|
||||
<span class="badge-value">2.2M</span>
|
||||
</a>
|
||||
<div class="github-badge badge-tokens" title="Number of tokens processed weekly by Aider users">
|
||||
<span class="badge-label">📈 Tokens/week</span>
|
||||
@@ -85,7 +85,7 @@ cog.out(text)
|
||||
</a>
|
||||
<a href="/HISTORY.html" class="github-badge badge-coded" title="Percentage of the new code in Aider's last release written by Aider itself">
|
||||
<span class="badge-label">🔄 Singularity</span>
|
||||
<span class="badge-value">86%</span>
|
||||
<span class="badge-value">92%</span>
|
||||
</a>
|
||||
<!--[[[end]]]-->
|
||||
</div>
|
||||
@@ -268,6 +268,11 @@ cog.out(text)
|
||||
]]]-->
|
||||
<script>
|
||||
const testimonials = [
|
||||
{
|
||||
text: "My life has changed... There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world.",
|
||||
author: "Eric S. Raymond",
|
||||
link: "https://x.com/esrtweet/status/1910809356381413593"
|
||||
},
|
||||
{
|
||||
text: "The best free open source AI coding assistant.",
|
||||
author: "IndyDevDan",
|
||||
@@ -404,7 +409,7 @@ const testimonials = [
|
||||
link: "https://x.com/ccui42/status/1904965344999145698"
|
||||
},
|
||||
{
|
||||
text: "Aider is the precision tool of LLM code gen. It is minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control.",
|
||||
text: "Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control.",
|
||||
author: "Reilly Sweetland",
|
||||
link: "https://x.com/rsweetland/status/1904963807237259586"
|
||||
},
|
||||
@@ -412,6 +417,31 @@ const testimonials = [
|
||||
text: "Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot.",
|
||||
author: "autopoietist",
|
||||
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101"
|
||||
},
|
||||
{
|
||||
text: "Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone.",
|
||||
author: "Joshua D Vander Hook",
|
||||
link: "https://x.com/jodavaho/status/1911154899057795218"
|
||||
},
|
||||
{
|
||||
text: "thanks to aider, i have started and finished three personal projects within the last two days",
|
||||
author: "joseph stalzyn",
|
||||
link: "https://x.com/anitaheeder/status/1908338609645904160"
|
||||
},
|
||||
{
|
||||
text: "Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words.",
|
||||
author: "koleok",
|
||||
link: "https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783"
|
||||
},
|
||||
{
|
||||
text: "Aider ... is the tool to benchmark against.",
|
||||
author: "BeetleB",
|
||||
link: "https://news.ycombinator.com/item?id=43930201"
|
||||
},
|
||||
{
|
||||
text: "aider is really cool",
|
||||
author: "kache (@yacineMTB)",
|
||||
link: "https://x.com/yacineMTB/status/1911224442430124387"
|
||||
}
|
||||
];
|
||||
</script>
|
||||
@@ -611,7 +641,7 @@ const testimonials = [
|
||||
<ul class="info-list">
|
||||
<li><a href="/docs/leaderboards/">LLM Leaderboards</a></li>
|
||||
<li><a href="https://github.com/Aider-AI/aider">GitHub Repository</a></li>
|
||||
<li><a href="https://discord.gg/Tv2uQnR88V">Discord Community</a></li>
|
||||
<li><a href="https://discord.gg/Y7X7bhMQFV">Discord Community</a></li>
|
||||
<li><a href="/blog/">Blog</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -624,7 +654,7 @@ const testimonials = [
|
||||
<div class="footer-links">
|
||||
<a href="/docs/install.html">Documentation</a>
|
||||
<a href="https://github.com/Aider-AI/aider">GitHub</a>
|
||||
<a href="https://discord.gg/Tv2uQnR88V">Discord</a>
|
||||
<a href="https://discord.gg/Y7X7bhMQFV">Discord</a>
|
||||
<a href="/blog/">Blog</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -206,6 +206,12 @@ def main(
|
||||
read_model_settings: str = typer.Option(
|
||||
None, "--read-model-settings", help="Load aider model settings from YAML file"
|
||||
),
|
||||
reasoning_effort: Optional[str] = typer.Option(
|
||||
None, "--reasoning-effort", help="Set reasoning effort for models that support it"
|
||||
),
|
||||
thinking_tokens: Optional[int] = typer.Option(
|
||||
None, "--thinking-tokens", help="Set thinking tokens for models that support it"
|
||||
),
|
||||
exercises_dir: str = typer.Option(
|
||||
EXERCISES_DIR_DEFAULT, "--exercises-dir", help="Directory with exercise files"
|
||||
),
|
||||
@@ -362,6 +368,8 @@ def main(
|
||||
editor_edit_format,
|
||||
num_ctx,
|
||||
sleep,
|
||||
reasoning_effort,
|
||||
thinking_tokens,
|
||||
)
|
||||
|
||||
all_results.append(results)
|
||||
@@ -384,6 +392,10 @@ def main(
|
||||
replay,
|
||||
editor_model,
|
||||
editor_edit_format,
|
||||
num_ctx,
|
||||
sleep,
|
||||
reasoning_effort,
|
||||
thinking_tokens,
|
||||
)
|
||||
all_results = run_test_threaded.gather(tqdm=True)
|
||||
|
||||
@@ -480,7 +492,11 @@ def summarize_results(dirname, stats_languages=None):
|
||||
res.syntax_errors = 0
|
||||
res.indentation_errors = 0
|
||||
res.lazy_comments = 0
|
||||
res.prompt_tokens = 0
|
||||
res.completion_tokens = 0
|
||||
|
||||
res.reasoning_effort = None
|
||||
res.thinking_tokens = None
|
||||
variants = defaultdict(set)
|
||||
|
||||
for results in all_results:
|
||||
@@ -509,6 +525,12 @@ def summarize_results(dirname, stats_languages=None):
|
||||
res.syntax_errors += results.get("syntax_errors", 0)
|
||||
res.indentation_errors += results.get("indentation_errors", 0)
|
||||
|
||||
res.prompt_tokens += results.get("prompt_tokens", 0)
|
||||
res.completion_tokens += results.get("completion_tokens", 0)
|
||||
|
||||
res.reasoning_effort = results.get("reasoning_effort")
|
||||
res.thinking_tokens = results.get("thinking_tokens")
|
||||
|
||||
for key in "model edit_format commit_hash editor_model editor_edit_format".split():
|
||||
val = results.get(key)
|
||||
if val:
|
||||
@@ -552,6 +574,11 @@ def summarize_results(dirname, stats_languages=None):
|
||||
setattr(res, key, val)
|
||||
console.print(f" {key}: {val}", style=style)
|
||||
|
||||
if res.reasoning_effort is not None:
|
||||
print(f" reasoning_effort: {res.reasoning_effort}")
|
||||
if res.thinking_tokens is not None:
|
||||
print(f" thinking_tokens: {res.thinking_tokens}")
|
||||
|
||||
for i in range(tries):
|
||||
print(f" pass_rate_{i + 1}: {percents[i]:.1f}")
|
||||
for i in range(tries):
|
||||
@@ -568,6 +595,8 @@ def summarize_results(dirname, stats_languages=None):
|
||||
show("syntax_errors")
|
||||
show("indentation_errors")
|
||||
show("exhausted_context_windows")
|
||||
show("prompt_tokens", red=None)
|
||||
show("completion_tokens", red=None)
|
||||
show("test_timeouts")
|
||||
print(f" total_tests: {res.total_tests}")
|
||||
|
||||
@@ -637,15 +666,14 @@ def get_replayed_content(replay_dname, test_dname):
|
||||
def run_test(original_dname, testdir, *args, **kwargs):
|
||||
try:
|
||||
return run_test_real(original_dname, testdir, *args, **kwargs)
|
||||
except Exception as err:
|
||||
except Exception:
|
||||
print("=" * 40)
|
||||
print("Test failed")
|
||||
print(err)
|
||||
traceback.print_exc()
|
||||
|
||||
testdir = Path(testdir)
|
||||
results_fname = testdir / ".aider.results.json"
|
||||
results_fname.write_text(json.dumps(dict(exception=str(err))))
|
||||
results_fname.write_text(json.dumps(dict(exception=traceback.format_exc())))
|
||||
|
||||
|
||||
def run_test_real(
|
||||
@@ -663,6 +691,8 @@ def run_test_real(
|
||||
editor_edit_format,
|
||||
num_ctx=None,
|
||||
sleep=0,
|
||||
reasoning_effort: Optional[str] = None,
|
||||
thinking_tokens: Optional[int] = None,
|
||||
read_model_settings=None,
|
||||
):
|
||||
if not os.path.isdir(testdir):
|
||||
@@ -754,7 +784,7 @@ def run_test_real(
|
||||
instructions += prompts.instructions_addendum.format(file_list=file_list)
|
||||
|
||||
io = InputOutput(
|
||||
pretty=True,
|
||||
pretty=False,
|
||||
yes=True,
|
||||
chat_history_file=history_fname,
|
||||
)
|
||||
@@ -767,8 +797,15 @@ def run_test_real(
|
||||
weak_model=weak_model_name,
|
||||
editor_model=editor_model,
|
||||
editor_edit_format=editor_edit_format,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
if reasoning_effort is not None:
|
||||
main_model.set_reasoning_effort(reasoning_effort)
|
||||
|
||||
if thinking_tokens is not None:
|
||||
main_model.set_thinking_tokens(thinking_tokens)
|
||||
|
||||
dump(main_model.max_chat_history_tokens)
|
||||
|
||||
if num_ctx:
|
||||
@@ -919,6 +956,10 @@ def run_test_real(
|
||||
syntax_errors=syntax_errors,
|
||||
indentation_errors=indentation_errors,
|
||||
lazy_comments=lazy_comments, # Add the count of pattern matches to the results
|
||||
reasoning_effort=reasoning_effort,
|
||||
prompt_tokens=coder.total_tokens_sent,
|
||||
completion_tokens=coder.total_tokens_received,
|
||||
thinking_tokens=thinking_tokens,
|
||||
chat_hashes=list(
|
||||
zip(
|
||||
coder.chat_completion_call_hashes,
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
docker run \
|
||||
-it --rm \
|
||||
--memory=25g \
|
||||
--memory-swap=25g \
|
||||
--memory=12g \
|
||||
--memory-swap=12g \
|
||||
--add-host=host.docker.internal:host-gateway \
|
||||
-v `pwd`:/aider \
|
||||
-v `pwd`/tmp.benchmarks/.:/benchmarks \
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user