mirror of
https://github.com/Aider-AI/aider
synced 2026-05-05 06:32:04 +02:00
Compare commits
1630 Commits
v0.77.1.de
...
v0.83.3.de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03a489ea35 | ||
|
|
81389b87d7 | ||
|
|
0d8ff295d6 | ||
|
|
6176a8dee3 | ||
|
|
299e6ae7a2 | ||
|
|
0b1d49d630 | ||
|
|
037a36edba | ||
|
|
66bc9cf292 | ||
|
|
2b9e669930 | ||
|
|
cb88b7e62a | ||
|
|
4e9943f2aa | ||
|
|
9f5018e89e | ||
|
|
3caab85931 | ||
|
|
756372809e | ||
|
|
6aa05ab11c | ||
|
|
9cf373039e | ||
|
|
bc1272f029 | ||
|
|
0049e78250 | ||
|
|
56b45ce1d3 | ||
|
|
bdd67eb229 | ||
|
|
57020a2d5e | ||
|
|
6b9045a2a2 | ||
|
|
5f24a0013a | ||
|
|
b79052501d | ||
|
|
9e0d7d9c46 | ||
|
|
a53ab7d937 | ||
|
|
c055602c6f | ||
|
|
170e8fc9a1 | ||
|
|
ee177054b8 | ||
|
|
f018b5fab5 | ||
|
|
5a29ba03dc | ||
|
|
035d99d3d3 | ||
|
|
702eff1033 | ||
|
|
97f3885357 | ||
|
|
f8653613bc | ||
|
|
b1d47c47d9 | ||
|
|
2c4a126093 | ||
|
|
cdd1546243 | ||
|
|
6a3bb0f4ec | ||
|
|
24c0fbd326 | ||
|
|
7b9eae117f | ||
|
|
512b4d891b | ||
|
|
a6b0f43dce | ||
|
|
e8d9ae9a1f | ||
|
|
2ab0074915 | ||
|
|
225e01717c | ||
|
|
4d39b88110 | ||
|
|
5052150e2e | ||
|
|
d8fbd9cbd3 | ||
|
|
53cda2cc10 | ||
|
|
543e5570ae | ||
|
|
62c7e15a36 | ||
|
|
17a2773a22 | ||
|
|
b8758ca791 | ||
|
|
bf9522a2fb | ||
|
|
ddc8621d6e | ||
|
|
7875de078a | ||
|
|
ea1189b8ec | ||
|
|
1127b8b559 | ||
|
|
64f218a06e | ||
|
|
efde8e867e | ||
|
|
f815f0377e | ||
|
|
883aa9e03d | ||
|
|
2a410fab81 | ||
|
|
34409311a3 | ||
|
|
97379aa02f | ||
|
|
ee4e9c9711 | ||
|
|
7d3c817664 | ||
|
|
8c755bf032 | ||
|
|
0b112e948f | ||
|
|
c11d21a230 | ||
|
|
a9cb1a9d61 | ||
|
|
43cd0164e0 | ||
|
|
49b3f85cc5 | ||
|
|
3daf7d4df3 | ||
|
|
3dcb23c193 | ||
|
|
cad31b638b | ||
|
|
7fbe0d25f5 | ||
|
|
637a31e083 | ||
|
|
f928ffc3fc | ||
|
|
23cb604e6e | ||
|
|
09880ee8f4 | ||
|
|
425fb6d7a8 | ||
|
|
28d87767cd | ||
|
|
ed262b8b06 | ||
|
|
7f30320566 | ||
|
|
9d74e8c730 | ||
|
|
1b2eeaff56 | ||
|
|
0632c7a90f | ||
|
|
c806f18698 | ||
|
|
91d7fbd659 | ||
|
|
fcc85a7ae6 | ||
|
|
dbfba029af | ||
|
|
88fba5f20b | ||
|
|
f7a073961c | ||
|
|
f8c154edce | ||
|
|
c6ad5c8cd2 | ||
|
|
af9ae849bd | ||
|
|
64b4d13880 | ||
|
|
6620141420 | ||
|
|
d79bc2c05b | ||
|
|
9978f6c51e | ||
|
|
5be642fbec | ||
|
|
9f1ef3f49f | ||
|
|
a3562d1d62 | ||
|
|
4e608dbd77 | ||
|
|
3f49acf390 | ||
|
|
77deb35022 | ||
|
|
1a7960810c | ||
|
|
766a41d5de | ||
|
|
df967e4b41 | ||
|
|
781ed90653 | ||
|
|
b9885bb76d | ||
|
|
11480f6110 | ||
|
|
2bc9386876 | ||
|
|
04cbe87caa | ||
|
|
4c959f4542 | ||
|
|
8652fcf86e | ||
|
|
23714d7db6 | ||
|
|
81b86441fd | ||
|
|
edb3bf84cc | ||
|
|
4d5852a30e | ||
|
|
7a5877ea50 | ||
|
|
52ae22bcf8 | ||
|
|
4fb2d78011 | ||
|
|
c93c22ec98 | ||
|
|
a26a3145ba | ||
|
|
055a3d795a | ||
|
|
2d34b738bc | ||
|
|
292aa9bded | ||
|
|
4e86a82a08 | ||
|
|
784ac79da1 | ||
|
|
647f556582 | ||
|
|
aad6838e15 | ||
|
|
95cc362c07 | ||
|
|
9ef506dc25 | ||
|
|
b236e0c801 | ||
|
|
c706663841 | ||
|
|
d7e091f315 | ||
|
|
37601eb4b7 | ||
|
|
a22772b388 | ||
|
|
befff1f22e | ||
|
|
0864a7ca76 | ||
|
|
01592afac3 | ||
|
|
3a5a46253d | ||
|
|
5bb891b2bb | ||
|
|
18f702b95a | ||
|
|
e6a35be5b7 | ||
|
|
6351964bcd | ||
|
|
ede3061fe0 | ||
|
|
f1121e3b7c | ||
|
|
a1cb86dca3 | ||
|
|
cf1d58745e | ||
|
|
98dc8e5d57 | ||
|
|
21a05ead4e | ||
|
|
80f78ee85d | ||
|
|
540b2519c2 | ||
|
|
d3931f67ca | ||
|
|
b6a32d8682 | ||
|
|
023e939798 | ||
|
|
38e7f04e60 | ||
|
|
b40baaceea | ||
|
|
ff549cf9ba | ||
|
|
2c1685bb36 | ||
|
|
2a61494442 | ||
|
|
0af5563e77 | ||
|
|
c147571b18 | ||
|
|
311981f4e5 | ||
|
|
79923c954b | ||
|
|
0b4430f228 | ||
|
|
ee9ad75509 | ||
|
|
920b20b17d | ||
|
|
9297ee982d | ||
|
|
1d5c3c3a2b | ||
|
|
217b45ae88 | ||
|
|
1f6f480864 | ||
|
|
40a5a88d56 | ||
|
|
30097ab859 | ||
|
|
09acfc8147 | ||
|
|
a2ecc5883b | ||
|
|
d127d45669 | ||
|
|
2ebb2103b8 | ||
|
|
c3d4fdb4c1 | ||
|
|
e1ab9cc0ab | ||
|
|
15317a9f4b | ||
|
|
62dc55dd77 | ||
|
|
20faadcbd9 | ||
|
|
8f0fa6684d | ||
|
|
7a3805d39f | ||
|
|
4709a539c6 | ||
|
|
8172125931 | ||
|
|
b8f9d459fb | ||
|
|
96bc57167f | ||
|
|
606e27a337 | ||
|
|
1d7c56b8c5 | ||
|
|
6e1327f66d | ||
|
|
82f33c1220 | ||
|
|
cd7567fcf6 | ||
|
|
e4274aa4f6 | ||
|
|
acd7309b78 | ||
|
|
d5ea078f24 | ||
|
|
8776830306 | ||
|
|
43dd9ef8a5 | ||
|
|
f047b2928b | ||
|
|
d89d500eab | ||
|
|
35fe1df499 | ||
|
|
d32d0b7909 | ||
|
|
0a5c1960b3 | ||
|
|
eef0051b93 | ||
|
|
b5cde63b37 | ||
|
|
043c42b2b4 | ||
|
|
758fa6f67e | ||
|
|
c2fce2699e | ||
|
|
328584e5f4 | ||
|
|
f12395f4d3 | ||
|
|
024c3ed46e | ||
|
|
3ed897c665 | ||
|
|
bfcff84b28 | ||
|
|
4124cee722 | ||
|
|
d18a9f32bc | ||
|
|
aef3863c4a | ||
|
|
f31128706d | ||
|
|
1307215b8f | ||
|
|
cb380b423e | ||
|
|
86d338c811 | ||
|
|
dd3ef07881 | ||
|
|
69f14ace01 | ||
|
|
08220f598c | ||
|
|
9badb711ff | ||
|
|
90b5f897f9 | ||
|
|
4a14aeb7d9 | ||
|
|
fef0f1fa3a | ||
|
|
a39cec8e1d | ||
|
|
c89ac40f56 | ||
|
|
114a0e5ab9 | ||
|
|
371c82e5bb | ||
|
|
71338a679e | ||
|
|
aeaf259021 | ||
|
|
bdec02e290 | ||
|
|
5090f28151 | ||
|
|
a98b531bcc | ||
|
|
8727ffbe68 | ||
|
|
e7de5382fb | ||
|
|
8956eef339 | ||
|
|
0c236d0035 | ||
|
|
aaacee5d4d | ||
|
|
da00455388 | ||
|
|
03acee1ed2 | ||
|
|
4ab8faf21e | ||
|
|
2f45023f59 | ||
|
|
1d2818a064 | ||
|
|
582da0ee44 | ||
|
|
592dea0f8c | ||
|
|
dd8db78680 | ||
|
|
23ce877bd2 | ||
|
|
8bb971c15d | ||
|
|
fe20e528b0 | ||
|
|
8dd8fb52f4 | ||
|
|
af9fcdcfa8 | ||
|
|
9990965e82 | ||
|
|
5b52063446 | ||
|
|
b2e3d47d14 | ||
|
|
67cbda3bd5 | ||
|
|
84d6cf937b | ||
|
|
765ac2a14d | ||
|
|
1167700a53 | ||
|
|
c6954f9972 | ||
|
|
c72e5fcc5e | ||
|
|
4ec075d290 | ||
|
|
60a1a3a8c8 | ||
|
|
bf38754846 | ||
|
|
94197cb25d | ||
|
|
cbaaf96324 | ||
|
|
96899a140b | ||
|
|
c756b080e8 | ||
|
|
a61fb1e23b | ||
|
|
9660d95ceb | ||
|
|
eabc98b64a | ||
|
|
5ff3d1a0c5 | ||
|
|
b6587de389 | ||
|
|
4d9f4e0202 | ||
|
|
e9d2f527a1 | ||
|
|
98e6939c48 | ||
|
|
e3911f8621 | ||
|
|
efd5f79368 | ||
|
|
8e84b5c0b1 | ||
|
|
c1dc473ed8 | ||
|
|
3b08327792 | ||
|
|
8b08c5a5f3 | ||
|
|
eedea62ac1 | ||
|
|
146f62abcc | ||
|
|
1c854f2e83 | ||
|
|
d27bb56cf3 | ||
|
|
28aeb17cbe | ||
|
|
b3cf318c5e | ||
|
|
4acf65fcfb | ||
|
|
4c871c6f50 | ||
|
|
d56ce3ae56 | ||
|
|
5225d7f50c | ||
|
|
41392a1c6e | ||
|
|
ca714157b8 | ||
|
|
9dd2d2a3b1 | ||
|
|
e53f2f7674 | ||
|
|
edbfec0ce4 | ||
|
|
d294e8cd49 | ||
|
|
2229bb9817 | ||
|
|
7ef7b6e042 | ||
|
|
8159cbf7d3 | ||
|
|
c23e609902 | ||
|
|
2d9ea25273 | ||
|
|
7773bbc908 | ||
|
|
72476f0967 | ||
|
|
a9883ccc25 | ||
|
|
3b9b93a8a4 | ||
|
|
f90b7bfb09 | ||
|
|
edc941eb9e | ||
|
|
5e7ef6c50e | ||
|
|
fdc7be1318 | ||
|
|
f00c1bf61b | ||
|
|
09030de0b5 | ||
|
|
bdba0ca1c5 | ||
|
|
e17c7d938c | ||
|
|
433f2908a0 | ||
|
|
9fa5f5ace1 | ||
|
|
849a379a8c | ||
|
|
e205629a94 | ||
|
|
9351f37935 | ||
|
|
7d185bb710 | ||
|
|
07759813ed | ||
|
|
591d294052 | ||
|
|
df1a0c5b8d | ||
|
|
e743394537 | ||
|
|
22f140ac05 | ||
|
|
25a303935c | ||
|
|
3bf20d4f7a | ||
|
|
45413ce815 | ||
|
|
8ffe466257 | ||
|
|
d9aa3cb2d4 | ||
|
|
5251a2452c | ||
|
|
6df2c1595f | ||
|
|
c56e4a08d3 | ||
|
|
80515b69c1 | ||
|
|
303645cffa | ||
|
|
b3d32f65d3 | ||
|
|
7c0aac7454 | ||
|
|
7719eae023 | ||
|
|
5e210c700d | ||
|
|
c6ce871700 | ||
|
|
f28504a2eb | ||
|
|
48733a315b | ||
|
|
16fbff8de1 | ||
|
|
bbab0cea5e | ||
|
|
19de93ae39 | ||
|
|
230e5065c1 | ||
|
|
c94340d493 | ||
|
|
ac1ff231e0 | ||
|
|
5423ffe518 | ||
|
|
ba4d613cbc | ||
|
|
ab11118c8a | ||
|
|
3ca3f39f1d | ||
|
|
8c3f167e8c | ||
|
|
1a4d3927e7 | ||
|
|
20a29e5cd1 | ||
|
|
51e0fff822 | ||
|
|
13b3e75d0e | ||
|
|
de28178369 | ||
|
|
2f38cd184c | ||
|
|
d8caa76bc8 | ||
|
|
506c3c928e | ||
|
|
48ac1de8d3 | ||
|
|
ebfce5b0f2 | ||
|
|
58f4db4e52 | ||
|
|
ba2c4d1eb7 | ||
|
|
6656b5d973 | ||
|
|
b4673fdc85 | ||
|
|
ce1266be68 | ||
|
|
226108d05d | ||
|
|
b2d541f1eb | ||
|
|
758020c574 | ||
|
|
876569613b | ||
|
|
82b26daf37 | ||
|
|
be44b65095 | ||
|
|
8596f0d4a3 | ||
|
|
19a94e5f15 | ||
|
|
7bde345b83 | ||
|
|
d45a5747ea | ||
|
|
e560ab61b6 | ||
|
|
84c3ac93ef | ||
|
|
7a50b7779a | ||
|
|
328a3c3178 | ||
|
|
21fa54d792 | ||
|
|
ec7ac60cfc | ||
|
|
c2d8d5dc82 | ||
|
|
20a7e3552c | ||
|
|
888168f044 | ||
|
|
851642a1bd | ||
|
|
f7bdebfba9 | ||
|
|
a4d3222108 | ||
|
|
f1caab9de0 | ||
|
|
c08336fdb0 | ||
|
|
541b496d09 | ||
|
|
622bf349c5 | ||
|
|
05eaf82b36 | ||
|
|
5c8150fd16 | ||
|
|
ec9327dcb4 | ||
|
|
8e689d35af | ||
|
|
50fd544070 | ||
|
|
4f8bd2e06d | ||
|
|
6f1b6f5f31 | ||
|
|
bdfda399cb | ||
|
|
a08ffc3513 | ||
|
|
21beee2fe1 | ||
|
|
a564f94bf3 | ||
|
|
9e54898866 | ||
|
|
739e01da95 | ||
|
|
3e0af2cc84 | ||
|
|
9ff13740f2 | ||
|
|
00e5c33444 | ||
|
|
57abaf7500 | ||
|
|
ed14be4e70 | ||
|
|
80909e17c7 | ||
|
|
52697ea884 | ||
|
|
9f01c8d0d6 | ||
|
|
e91d7e74ae | ||
|
|
20ca0463ea | ||
|
|
5e40f469bf | ||
|
|
7f28d63c33 | ||
|
|
bb1fa24971 | ||
|
|
ffbbaa06d7 | ||
|
|
14e1b96f05 | ||
|
|
d8c781b66b | ||
|
|
2fbec8545c | ||
|
|
b66901fc75 | ||
|
|
d569bca520 | ||
|
|
efbefc669f | ||
|
|
24805ff85d | ||
|
|
8b917d5716 | ||
|
|
3502f335ec | ||
|
|
758979e4f3 | ||
|
|
8b5fc801da | ||
|
|
f5c4214c93 | ||
|
|
f106993cd1 | ||
|
|
270e84287a | ||
|
|
daec7cf3f4 | ||
|
|
bb42d1e9a5 | ||
|
|
23f182aab3 | ||
|
|
119fbc995c | ||
|
|
3081f49179 | ||
|
|
8cf1874453 | ||
|
|
31b4bd5bcf | ||
|
|
71d1591cc1 | ||
|
|
134a2d60fe | ||
|
|
152b8912ae | ||
|
|
36f23c101d | ||
|
|
0e40510295 | ||
|
|
db0d0768d7 | ||
|
|
c68cade9f2 | ||
|
|
14928727eb | ||
|
|
67b9345929 | ||
|
|
dae1a376a2 | ||
|
|
1e359f1dcf | ||
|
|
1c54857422 | ||
|
|
0f78a0ac5c | ||
|
|
4e1e77890b | ||
|
|
5573cdfba1 | ||
|
|
14028d3758 | ||
|
|
3ab673b398 | ||
|
|
861f51f6c3 | ||
|
|
64f5d0d388 | ||
|
|
9059af8d5f | ||
|
|
c3a543b99d | ||
|
|
c85cd783e5 | ||
|
|
af2d241c99 | ||
|
|
30839a5273 | ||
|
|
8baa99b7ef | ||
|
|
d1e5572343 | ||
|
|
96aa648e17 | ||
|
|
1ae5f23dc8 | ||
|
|
f565f72679 | ||
|
|
78e76648d0 | ||
|
|
8e1e2210dd | ||
|
|
e8c43c36d7 | ||
|
|
97e2a7bae0 | ||
|
|
6b75a578ac | ||
|
|
8b9238ebc9 | ||
|
|
8cc8027b40 | ||
|
|
ffb743e108 | ||
|
|
0f805752d3 | ||
|
|
4e9de4d51b | ||
|
|
a4e9539040 | ||
|
|
0c383dfb11 | ||
|
|
11d2b7ca98 | ||
|
|
e38be2f280 | ||
|
|
febdd3c0d0 | ||
|
|
0b08ca64a8 | ||
|
|
0f8e7fbd34 | ||
|
|
1a080ba71c | ||
|
|
1622531d85 | ||
|
|
7d0a9c7233 | ||
|
|
53a64c88ad | ||
|
|
27b51d51d8 | ||
|
|
bec35e0538 | ||
|
|
f65e6a3bb1 | ||
|
|
fd94f1a5f9 | ||
|
|
09fc037d4d | ||
|
|
cf0e6dac61 | ||
|
|
3b10e3bcb5 | ||
|
|
4c17784444 | ||
|
|
6616f0886d | ||
|
|
dcafab2764 | ||
|
|
3b6146301f | ||
|
|
42e09b3c7f | ||
|
|
73da42bee6 | ||
|
|
415b1cf5f0 | ||
|
|
c011285904 | ||
|
|
4314b4fefb | ||
|
|
d686f6844d | ||
|
|
65a0e5f771 | ||
|
|
5ca6d8ce67 | ||
|
|
688c2b9ee5 | ||
|
|
271f39505c | ||
|
|
3e8367ea3b | ||
|
|
67a1e52259 | ||
|
|
7561687b7b | ||
|
|
93fc7acbe3 | ||
|
|
72dc67950f | ||
|
|
e2bebd1d51 | ||
|
|
03560d3386 | ||
|
|
a3a3303a83 | ||
|
|
232a6f87d2 | ||
|
|
ab71ea0a65 | ||
|
|
1302224f39 | ||
|
|
733bf0dcdf | ||
|
|
4ed48178a9 | ||
|
|
8cffb975d9 | ||
|
|
97b18797a4 | ||
|
|
579794b265 | ||
|
|
bea746595e | ||
|
|
87711b048a | ||
|
|
0b468ebd85 | ||
|
|
aefc250e30 | ||
|
|
4a86fea86b | ||
|
|
fe6e2e1ea7 | ||
|
|
09d90b9b70 | ||
|
|
14eb7b46a2 | ||
|
|
66077fe3a4 | ||
|
|
d50cf806db | ||
|
|
95edae9bd1 | ||
|
|
a6c35305ed | ||
|
|
b382005a4c | ||
|
|
a71b90bdd6 | ||
|
|
d4a68c80bc | ||
|
|
fcf44cbebe | ||
|
|
51d8cb063a | ||
|
|
cdc86565cc | ||
|
|
1c54907b30 | ||
|
|
b6d4246e18 | ||
|
|
cc1a984c7e | ||
|
|
52d39657ab | ||
|
|
363ec82a48 | ||
|
|
f164b0e3eb | ||
|
|
3aaf7a69ec | ||
|
|
6d2828bc3c | ||
|
|
dd6e2051a8 | ||
|
|
ef440972bb | ||
|
|
da96888669 | ||
|
|
75639059e1 | ||
|
|
0a15dd311a | ||
|
|
434a1c6710 | ||
|
|
f961eecab6 | ||
|
|
d33a571f7d | ||
|
|
ea1239efef | ||
|
|
19c7c7a9dc | ||
|
|
49e4af4fab | ||
|
|
3e27c1bb17 | ||
|
|
0f8d196741 | ||
|
|
4c45f0e44b | ||
|
|
e39eef1ed7 | ||
|
|
c9c7aea1c4 | ||
|
|
18ff9eb2b4 | ||
|
|
b2f3d2cd84 | ||
|
|
5e0832cb8b | ||
|
|
a14c0ccac6 | ||
|
|
278f90acdd | ||
|
|
8e8b18e9a9 | ||
|
|
a277d74869 | ||
|
|
7ca3b6455d | ||
|
|
5ec6f69037 | ||
|
|
39962ba5eb | ||
|
|
51fa1f9abd | ||
|
|
47af5d463c | ||
|
|
33f0b0b41c | ||
|
|
48038b1f5e | ||
|
|
323698d387 | ||
|
|
1f702beb74 | ||
|
|
7d34c28af1 | ||
|
|
d26be77010 | ||
|
|
3b96d1bd57 | ||
|
|
48fd0e71d5 | ||
|
|
bcb35ccf44 | ||
|
|
a663ff7fa8 | ||
|
|
813d34a0e9 | ||
|
|
a4074a13c4 | ||
|
|
249f329b07 | ||
|
|
cf160a8f84 | ||
|
|
4db963182d | ||
|
|
199b59fdb9 | ||
|
|
2d09bfa0f3 | ||
|
|
729285e8a2 | ||
|
|
afd17bd96a | ||
|
|
380d8570dc | ||
|
|
e711eaa810 | ||
|
|
7dfdc2094e | ||
|
|
838646ac5b | ||
|
|
507f07575b | ||
|
|
f5e8808770 | ||
|
|
ae5b6e88a5 | ||
|
|
b45186dde0 | ||
|
|
38be8aa0da | ||
|
|
816d4ba206 | ||
|
|
ede59e4d2a | ||
|
|
ce0931a3c8 | ||
|
|
a44e148818 | ||
|
|
71115c6558 | ||
|
|
8ae837e98b | ||
|
|
9518193d0a | ||
|
|
60a2b799e6 | ||
|
|
1d42690824 | ||
|
|
3f94fd5e4e | ||
|
|
165e237be7 | ||
|
|
38dfd6f4f9 | ||
|
|
5851d66174 | ||
|
|
6a970c3515 | ||
|
|
9e91e8f1b2 | ||
|
|
3e1bc77bf2 | ||
|
|
d991cb6721 | ||
|
|
37a252748a | ||
|
|
5664b5b195 | ||
|
|
278a596bfa | ||
|
|
ea74f31b3e | ||
|
|
9d7dc00f25 | ||
|
|
882e7b6716 | ||
|
|
8ba29ee8e6 | ||
|
|
dd4b61da20 | ||
|
|
c56e836d22 | ||
|
|
427f9c5b00 | ||
|
|
aa07e16f18 | ||
|
|
7b8c7edfd5 | ||
|
|
cf7b35f90d | ||
|
|
02bc9a85c0 | ||
|
|
e1820522db | ||
|
|
0a59c38f31 | ||
|
|
66fdeceb3b | ||
|
|
3f67c41759 | ||
|
|
7fbeafa1cf | ||
|
|
316d8f8e9b | ||
|
|
15d623f2c0 | ||
|
|
d1437b7666 | ||
|
|
ff8e9850ba | ||
|
|
f648a018a2 | ||
|
|
072bd30443 | ||
|
|
48f89f226f | ||
|
|
d5671c2879 | ||
|
|
80114e7a24 | ||
|
|
dede701423 | ||
|
|
43cb4d68f7 | ||
|
|
4783ad3a73 | ||
|
|
482e0c2d0b | ||
|
|
e951164399 | ||
|
|
c73b987cd0 | ||
|
|
b22c9b8542 | ||
|
|
a5327af5e9 | ||
|
|
192f8bec26 | ||
|
|
eb28e22891 | ||
|
|
b6b8f30378 | ||
|
|
67bb4f9552 | ||
|
|
028257480b | ||
|
|
e42a0c45b6 | ||
|
|
1e7f8549ff | ||
|
|
668de71f9d | ||
|
|
067245b810 | ||
|
|
8f236c69e1 | ||
|
|
8ee33da114 | ||
|
|
2fedc2e699 | ||
|
|
1961543e2f | ||
|
|
b4f65734a5 | ||
|
|
0eb80553f6 | ||
|
|
110c63ae95 | ||
|
|
57304536bf | ||
|
|
a9ca5da139 | ||
|
|
947aebfbe0 | ||
|
|
fafc9268d4 | ||
|
|
65a5d55436 | ||
|
|
96b350400f | ||
|
|
7983b4caf2 | ||
|
|
e44122f1be | ||
|
|
42618d7ec6 | ||
|
|
1d0167bbf4 | ||
|
|
43d4b21b23 | ||
|
|
562171c548 | ||
|
|
8dccecdd9f | ||
|
|
940ae364d7 | ||
|
|
532bc454c5 | ||
|
|
14ffe7782c | ||
|
|
2dd40fce44 | ||
|
|
0c8bc46e28 | ||
|
|
7d0dd29937 | ||
|
|
349cd77821 | ||
|
|
dc2d7b1dfe | ||
|
|
be30329288 | ||
|
|
71446d9f3c | ||
|
|
c9d4c8d09b | ||
|
|
c580ffdb70 | ||
|
|
f46deb4eb7 | ||
|
|
b3215bed48 | ||
|
|
2a9ab02753 | ||
|
|
0da586154d | ||
|
|
26d736551d | ||
|
|
9445a3118b | ||
|
|
a2c46c7436 | ||
|
|
8df7a0960e | ||
|
|
e7f35e7a35 | ||
|
|
088e80e38b | ||
|
|
2d65c7f387 | ||
|
|
94db758eb7 | ||
|
|
2bfb615d68 | ||
|
|
87275140f9 | ||
|
|
0672a68ba4 | ||
|
|
246e3ccfad | ||
|
|
b275ee919f | ||
|
|
eda796d5e0 | ||
|
|
d1b3917309 | ||
|
|
ffee2b971f | ||
|
|
b9a80f9c8c | ||
|
|
980f673ce2 | ||
|
|
55767a0003 | ||
|
|
fb44bebe40 | ||
|
|
b79f072499 | ||
|
|
d65a2e8b51 | ||
|
|
e0b42d51db | ||
|
|
c057dc9466 | ||
|
|
fff53a94d3 | ||
|
|
12beedd0a6 | ||
|
|
80f60a7394 | ||
|
|
2359348505 | ||
|
|
63e3e06a8c | ||
|
|
dca92b580c | ||
|
|
24e2960092 | ||
|
|
be1a52c5c1 | ||
|
|
8a34a6c8f4 | ||
|
|
7924ea9bb9 | ||
|
|
a3a17ae792 | ||
|
|
f8801d811b | ||
|
|
425284ac62 | ||
|
|
4872cdf905 | ||
|
|
88cd81c692 | ||
|
|
d45ecd0800 | ||
|
|
4bfcef60f4 | ||
|
|
e9b7e933f5 | ||
|
|
e5301cef49 | ||
|
|
01ca552174 | ||
|
|
4529d73bf3 | ||
|
|
0798906a51 | ||
|
|
8547c24dac | ||
|
|
0e1e1aae2e | ||
|
|
9cc31e4087 | ||
|
|
e9c7555bb9 | ||
|
|
6f897fec59 | ||
|
|
8c3d77f4c7 | ||
|
|
f9b60d83ac | ||
|
|
3992681b84 | ||
|
|
340bd78259 | ||
|
|
12a46275a2 | ||
|
|
b56234f1c9 | ||
|
|
60859ec2b9 | ||
|
|
0a840860f1 | ||
|
|
cebae18dd6 | ||
|
|
9c9c6b6591 | ||
|
|
ca0ffc66d1 | ||
|
|
b0623f04fe | ||
|
|
2dec862ea6 | ||
|
|
f18fe53a9a | ||
|
|
73348de2b4 | ||
|
|
f4a418bfcd | ||
|
|
50588800f5 | ||
|
|
2762215d66 | ||
|
|
4e53797aac | ||
|
|
b24ac4b3a2 | ||
|
|
88ab6afd3e | ||
|
|
5c5db0a961 | ||
|
|
587186d96c | ||
|
|
d9ddf93f83 | ||
|
|
d3882d3513 | ||
|
|
a458215bbb | ||
|
|
7ae0fa3775 | ||
|
|
f1695f8b15 | ||
|
|
4c08bbb9e5 | ||
|
|
9b55ff8c4c | ||
|
|
2096d2b786 | ||
|
|
70196cd6fd | ||
|
|
c2cba97722 | ||
|
|
7534ebd145 | ||
|
|
6b2331340b | ||
|
|
da7b5005fe | ||
|
|
9210e12316 | ||
|
|
2c47a79c38 | ||
|
|
48cebef974 | ||
|
|
52952efd33 | ||
|
|
30dfd28ac4 | ||
|
|
b5a04f05f3 | ||
|
|
d5a34dcbc5 | ||
|
|
fc6a05ced6 | ||
|
|
2d3162a90b | ||
|
|
83c599e741 | ||
|
|
c7f1671d5a | ||
|
|
9f2d945691 | ||
|
|
36ff099145 | ||
|
|
120e010e48 | ||
|
|
2887816cf0 | ||
|
|
9848479306 | ||
|
|
b662e6b9eb | ||
|
|
258f1f0848 | ||
|
|
a07f312089 | ||
|
|
605d8fe59a | ||
|
|
1c7db4da0d | ||
|
|
b0acc95b01 | ||
|
|
5bcad73515 | ||
|
|
db05754d29 | ||
|
|
dfe3457906 | ||
|
|
7dbb1a2aa8 | ||
|
|
83dac4aae2 | ||
|
|
75b79fa002 | ||
|
|
27c1fd0262 | ||
|
|
8069e06f43 | ||
|
|
8cd106fc8a | ||
|
|
a9c9877580 | ||
|
|
19e1201c8a | ||
|
|
912f98e6eb | ||
|
|
b6808e3700 | ||
|
|
a4f78b60e0 | ||
|
|
6c9906c639 | ||
|
|
8a90af6779 | ||
|
|
9831a13284 | ||
|
|
d2386bc1f6 | ||
|
|
5b10af7b1a | ||
|
|
eacf3cc4ed | ||
|
|
87090139f6 | ||
|
|
650c4cf948 | ||
|
|
24c074eeaa | ||
|
|
b54629addb | ||
|
|
cd67d11ecf | ||
|
|
16bb0c93e7 | ||
|
|
7c40c3a61c | ||
|
|
b8e8b7496d | ||
|
|
d29d5e3a47 | ||
|
|
e980973621 | ||
|
|
db261d0fa4 | ||
|
|
b7f6b847d6 | ||
|
|
5311a842a5 | ||
|
|
e288f59da7 | ||
|
|
c62ceb5db1 | ||
|
|
35decf122d | ||
|
|
2adfe1507b | ||
|
|
5516e6b279 | ||
|
|
8b811c610a | ||
|
|
23348f8e65 | ||
|
|
e1c3a2f8cf | ||
|
|
0b0493fa21 | ||
|
|
14d1742869 | ||
|
|
96aa77288b | ||
|
|
a4c9c10029 | ||
|
|
c9d561e7ad | ||
|
|
87ba63c14c | ||
|
|
0decbad7d0 | ||
|
|
0e647dbc0e | ||
|
|
2540d28b34 | ||
|
|
19a5e5bb00 | ||
|
|
f22afc6458 | ||
|
|
ab00415ca1 | ||
|
|
8df5406986 | ||
|
|
7f05159f0f | ||
|
|
61147dfecf | ||
|
|
06da133aac | ||
|
|
ff1d047048 | ||
|
|
4a8b17cb84 | ||
|
|
fbafc09e6a | ||
|
|
c3c960383e | ||
|
|
9e3adf0bf8 | ||
|
|
2bc0aa1777 | ||
|
|
3bc4064b61 | ||
|
|
b4f9258f3c | ||
|
|
ad844cce5c | ||
|
|
c73b064133 | ||
|
|
bd9b63a1aa | ||
|
|
2d87431aeb | ||
|
|
3f3b1fb657 | ||
|
|
477f9eb4ec | ||
|
|
91497dc2ee | ||
|
|
928b78d9f6 | ||
|
|
51825663b9 | ||
|
|
01fdbda728 | ||
|
|
fa3c68fccd | ||
|
|
189977e4c7 | ||
|
|
290fd99b6d | ||
|
|
15cec5bd50 | ||
|
|
f53db636e1 | ||
|
|
47d3802ffe | ||
|
|
e98ffb5ae0 | ||
|
|
5d77eb1314 | ||
|
|
f124cdbb6f | ||
|
|
1649d084d2 | ||
|
|
36ca790c3d | ||
|
|
a91a8216b7 | ||
|
|
8cae7b20e7 | ||
|
|
a537119f3d | ||
|
|
15fe0afe62 | ||
|
|
1b2a4db1ed | ||
|
|
88a02723fa | ||
|
|
7d013f35e2 | ||
|
|
e881d33bea | ||
|
|
38da91becd | ||
|
|
c99d96a700 | ||
|
|
fa89a6950b | ||
|
|
cde08da282 | ||
|
|
8619bd4e84 | ||
|
|
f49449b520 | ||
|
|
2fe79ac6a3 | ||
|
|
d8830c43c5 | ||
|
|
4ac945da70 | ||
|
|
ee0019e25f | ||
|
|
f37b814570 | ||
|
|
e559bc8694 | ||
|
|
f7618440e7 | ||
|
|
7bc62cb674 | ||
|
|
db77e2e9b9 | ||
|
|
083b49f3c4 | ||
|
|
f3f0416d31 | ||
|
|
775a9f86a1 | ||
|
|
e2bfdc444a | ||
|
|
3c7783585e | ||
|
|
761a297903 | ||
|
|
6d30094a93 | ||
|
|
d8e1816774 | ||
|
|
ae371cb362 | ||
|
|
73c46e8e24 | ||
|
|
ef4c40c692 | ||
|
|
04b3ada7f7 | ||
|
|
424b43b3d3 | ||
|
|
9a9255d6f9 | ||
|
|
d9e52e41ff | ||
|
|
a038bc002a | ||
|
|
fa256eb1a7 | ||
|
|
6689f001cf | ||
|
|
cc043bab9c | ||
|
|
5af73b1dcf | ||
|
|
85925a2dc6 | ||
|
|
fb23b6c26f | ||
|
|
d5cec5f71e | ||
|
|
13b62e3d06 | ||
|
|
779f07f072 | ||
|
|
b923d63700 | ||
|
|
7e2dd9bc04 | ||
|
|
ef1f869b73 | ||
|
|
959d6334db | ||
|
|
d7b00b93c7 | ||
|
|
eec084c842 | ||
|
|
87b504a58f | ||
|
|
243d4d0727 | ||
|
|
673acf4308 | ||
|
|
fd180ebff5 | ||
|
|
61705ce7fc | ||
|
|
6e1dd4474b | ||
|
|
7924657584 | ||
|
|
4f5ed8ace0 | ||
|
|
8737220fb6 | ||
|
|
bcb01e8c1b | ||
|
|
41f669bb89 | ||
|
|
983bc199b3 | ||
|
|
8f15269bd0 | ||
|
|
6ffe3e7067 | ||
|
|
51bf6035f7 | ||
|
|
249a6fc9b1 | ||
|
|
c6d4337855 | ||
|
|
9fa3636c57 | ||
|
|
a417e6e644 | ||
|
|
999eb86d7a | ||
|
|
7b97f93051 | ||
|
|
02bc926d75 | ||
|
|
48ee3cdf98 | ||
|
|
2556a912d3 | ||
|
|
487674b1c5 | ||
|
|
347fbf6471 | ||
|
|
3eff70a3bc | ||
|
|
ad7c708039 | ||
|
|
f993c1f22c | ||
|
|
75b714a1ad | ||
|
|
0636d40909 | ||
|
|
cbb3660a17 | ||
|
|
42363beb72 | ||
|
|
efa36a7196 | ||
|
|
fab713a6a8 | ||
|
|
7d5f1143af | ||
|
|
f05f8df44c | ||
|
|
c89a1a8021 | ||
|
|
db7c679e74 | ||
|
|
71cbbf545b | ||
|
|
6809a7ec3e | ||
|
|
70847a74c2 | ||
|
|
6933bc8add | ||
|
|
a6cbaad5a2 | ||
|
|
899972e22f | ||
|
|
820b925b78 | ||
|
|
5457b43a89 | ||
|
|
0adbc9678f | ||
|
|
8d6a2ecf0e | ||
|
|
6acbd80cee | ||
|
|
2331224157 | ||
|
|
5d2aea434c | ||
|
|
798f6983e4 | ||
|
|
a428fdc951 | ||
|
|
ccacc09ff0 | ||
|
|
40cc155aad | ||
|
|
645e0de971 | ||
|
|
325eb5968d | ||
|
|
65a7583731 | ||
|
|
6d8e4e8fb1 | ||
|
|
4716cce208 | ||
|
|
c67bbe6c00 | ||
|
|
1272e21603 | ||
|
|
7302280417 | ||
|
|
871c6d56dc | ||
|
|
eb0389938c | ||
|
|
e6c191bdc6 | ||
|
|
2727eb6dd7 | ||
|
|
821087bcce | ||
|
|
f1955577bc | ||
|
|
966686cd5d | ||
|
|
79d3d50de6 | ||
|
|
da59f0a0db | ||
|
|
bdc00e5dd4 | ||
|
|
9854a4f92e | ||
|
|
33413ecfe9 | ||
|
|
c7e8d297a4 | ||
|
|
3619953f83 | ||
|
|
b20753c3ac | ||
|
|
4fd6b7a608 | ||
|
|
1de8e477b9 | ||
|
|
6c50645213 | ||
|
|
5aeea0c228 | ||
|
|
86ceeb554d | ||
|
|
ca121e0e28 | ||
|
|
d1fef7fd17 | ||
|
|
25d157afdc | ||
|
|
97afe3cd0b | ||
|
|
2923304bdb | ||
|
|
10d741b6df | ||
|
|
12cd115ae4 | ||
|
|
d1def13cd1 | ||
|
|
127c305b1a | ||
|
|
fd06db18a0 | ||
|
|
387b392c18 | ||
|
|
964022f7b5 | ||
|
|
9edc346c2c | ||
|
|
2fe671744b | ||
|
|
fb8daa5607 | ||
|
|
65e0da72b8 | ||
|
|
3d5924e2f5 | ||
|
|
5b2e2d630b | ||
|
|
6a96cb6ba7 | ||
|
|
43ece9c644 | ||
|
|
46cef723b7 | ||
|
|
9b19dac569 | ||
|
|
54eb642726 | ||
|
|
5bb664657c | ||
|
|
dea4d16e87 | ||
|
|
059cd23543 | ||
|
|
502b8630a2 | ||
|
|
b2444b43a6 | ||
|
|
be87ff0193 | ||
|
|
c7fe86021c | ||
|
|
99b0209c89 | ||
|
|
4d61b596ff | ||
|
|
0549e7079d | ||
|
|
b591b64d3f | ||
|
|
a3377686fa | ||
|
|
d23bba5d9f | ||
|
|
5e8fc3e4c8 | ||
|
|
04e76ad6ff | ||
|
|
3a18a9296b | ||
|
|
3e9a6ffbca | ||
|
|
0b06e56182 | ||
|
|
fb5a32f429 | ||
|
|
75dfd4505b | ||
|
|
5661d1428e | ||
|
|
f543c1ee1c | ||
|
|
74254cdbd5 | ||
|
|
a5c8c534c1 | ||
|
|
24159dda58 | ||
|
|
ad0a2b3260 | ||
|
|
7e51c68fde | ||
|
|
3b376a15b7 | ||
|
|
8e5f311708 | ||
|
|
02c48fa8c3 | ||
|
|
51fa2eb103 | ||
|
|
0fd08fe667 | ||
|
|
0da65c6169 | ||
|
|
d2c6db0680 | ||
|
|
5b73938e29 | ||
|
|
2540933921 | ||
|
|
e75e2272f7 | ||
|
|
64b8fc80e8 | ||
|
|
033c149737 | ||
|
|
a6bce6d5f1 | ||
|
|
94016c87fd | ||
|
|
4b6424f631 | ||
|
|
9bf70d8641 | ||
|
|
0eb26fffe1 | ||
|
|
279e7e1456 | ||
|
|
099fc7b31e | ||
|
|
f1e070cc1e | ||
|
|
36f2458f2e | ||
|
|
29db4231ee | ||
|
|
74926578a2 | ||
|
|
5b8ae368bd | ||
|
|
b3e368237e | ||
|
|
3ee5dc131d | ||
|
|
2765d2fe66 | ||
|
|
f50fee407a | ||
|
|
f3295d7c1d | ||
|
|
dd02803f1f | ||
|
|
d4e44d7555 | ||
|
|
f2b8d36d9e | ||
|
|
8d43d4ee21 | ||
|
|
b54af5dbe3 | ||
|
|
4980e901a0 | ||
|
|
cd32311c1f | ||
|
|
b97997b009 | ||
|
|
4aad9fbdd4 | ||
|
|
ac2439e25b | ||
|
|
ddc1556ae0 | ||
|
|
0d75c4d0e3 | ||
|
|
f3a042dcdf | ||
|
|
f9e0a99064 | ||
|
|
d6906fb100 | ||
|
|
821662abcb | ||
|
|
cf496abec0 | ||
|
|
e245d39216 | ||
|
|
44f87c2c17 | ||
|
|
a6fd0de762 | ||
|
|
072ecba4c5 | ||
|
|
3ad5d75bee | ||
|
|
8679c425e0 | ||
|
|
1ec257278e | ||
|
|
ffe89362ab | ||
|
|
a82c0f9e49 | ||
|
|
413271e82a | ||
|
|
60b926b698 | ||
|
|
98be67442e | ||
|
|
8e9c75b6f8 | ||
|
|
8c65604b29 | ||
|
|
930923c211 | ||
|
|
710d540dcd | ||
|
|
ddef8c2499 | ||
|
|
c6289c2bea | ||
|
|
b624bb6fb8 | ||
|
|
4e03f4165d | ||
|
|
2f6ea2f499 | ||
|
|
8c47d1f9c0 | ||
|
|
e31be8d3c9 | ||
|
|
2069ad62d1 | ||
|
|
2993e285c8 | ||
|
|
4ee9cee52a | ||
|
|
0e555699bd | ||
|
|
aad6c63206 | ||
|
|
8c78e09f03 | ||
|
|
25d1eac0f5 | ||
|
|
26daf2cd31 | ||
|
|
280bd44ba3 | ||
|
|
c9d87fef71 | ||
|
|
649742f02a | ||
|
|
245ee84c2d | ||
|
|
76c09205d0 | ||
|
|
d365b5248c | ||
|
|
3dd91bc6f2 | ||
|
|
3dc5a48fcc | ||
|
|
bff077f855 | ||
|
|
6022b09437 | ||
|
|
62b52a78fe | ||
|
|
e33dc9355d | ||
|
|
834da07736 | ||
|
|
fe795cc2d5 | ||
|
|
593de47438 | ||
|
|
ac231e43ad | ||
|
|
aca64071c8 | ||
|
|
5c205cd753 | ||
|
|
57ce2c48b8 | ||
|
|
0937b34983 | ||
|
|
291c4eb258 | ||
|
|
a8744708a0 | ||
|
|
3010b94a64 | ||
|
|
9c4d173b1f | ||
|
|
cca155c5c6 | ||
|
|
a93e7108e6 | ||
|
|
d26df6c178 | ||
|
|
e915964d81 | ||
|
|
1304ca2425 | ||
|
|
894f6b6d1d | ||
|
|
6605a9b22a | ||
|
|
b5d963d151 | ||
|
|
8f840ae0b1 | ||
|
|
7942ca9206 | ||
|
|
a5614bfe40 | ||
|
|
eaac00ad3f | ||
|
|
6687cc9465 | ||
|
|
5b9464ed31 | ||
|
|
4c7d99b26c | ||
|
|
5bf248164a | ||
|
|
4eeae967db | ||
|
|
0ac2a85a3e | ||
|
|
71884e58d4 | ||
|
|
37b2f9617c | ||
|
|
01af14693c | ||
|
|
90e2929fc5 | ||
|
|
8f260aa544 | ||
|
|
ee72a09278 | ||
|
|
e6a422b6c2 | ||
|
|
d29aff919b | ||
|
|
5023bbe8c0 | ||
|
|
c08e3daf29 | ||
|
|
9cf194b52b | ||
|
|
1b0ba71ef0 | ||
|
|
de5ab102b7 | ||
|
|
4e2c7c3329 | ||
|
|
b62840c347 | ||
|
|
674379fe30 | ||
|
|
004ffb9f60 | ||
|
|
bed836b15a | ||
|
|
bbf219ecd2 | ||
|
|
022bf4bdcc | ||
|
|
d8ed90a2c0 | ||
|
|
e5d9d91b01 | ||
|
|
c8f6576cd7 | ||
|
|
14f140fdc5 | ||
|
|
fdbe169423 | ||
|
|
e4ec370e2a | ||
|
|
dfbdb43c31 | ||
|
|
9b8606535d | ||
|
|
0b79fd0a8e | ||
|
|
f032606f32 | ||
|
|
b652a8416d | ||
|
|
8f7b50f3df | ||
|
|
2adafad4e2 | ||
|
|
99a5862bc9 | ||
|
|
6dda54f431 | ||
|
|
8e187a913f | ||
|
|
8935c87d7a | ||
|
|
c6e544750b | ||
|
|
c14d406f59 | ||
|
|
4edfe39449 | ||
|
|
1239d77c88 | ||
|
|
7d4b0b20e8 | ||
|
|
1f42cdd762 | ||
|
|
f182b81e0f | ||
|
|
3f2256d4c4 | ||
|
|
e4b1b12b26 | ||
|
|
35477e0a5a | ||
|
|
f752c9c8a9 | ||
|
|
d5dd7b0a96 | ||
|
|
4e16eba6b0 | ||
|
|
4f129e1c9d | ||
|
|
ffbbb5539d | ||
|
|
30e5b48461 | ||
|
|
0dc31011f5 | ||
|
|
479df00a3b | ||
|
|
06f3e15ce9 | ||
|
|
2f7fde163a | ||
|
|
71d26c2ec5 | ||
|
|
acf8b40a28 | ||
|
|
318027d9e1 | ||
|
|
3116e833a7 | ||
|
|
1cb71b33fa | ||
|
|
7e76b8d0c3 | ||
|
|
e0e78cd879 | ||
|
|
82e4dcf40f | ||
|
|
22b77ad14f | ||
|
|
9b23975f09 | ||
|
|
584c3a65f5 | ||
|
|
db4d6c4419 | ||
|
|
92cb1c60d1 | ||
|
|
ca72d0ca5b | ||
|
|
ba7f245d88 | ||
|
|
ef67ba5cf4 | ||
|
|
1f630a03b9 | ||
|
|
5f71cdc497 | ||
|
|
b85226b8a5 | ||
|
|
af4acccf9f | ||
|
|
3a3bc9898d | ||
|
|
5349c99c74 | ||
|
|
68d175f1c5 | ||
|
|
352f1f31d2 | ||
|
|
c51ef714ad | ||
|
|
5ae68a3c48 | ||
|
|
d2c7e51fac | ||
|
|
d9aa3fb973 | ||
|
|
e1b79ddd69 | ||
|
|
3270a8737c | ||
|
|
cbe8d320fa | ||
|
|
78ce63e71e | ||
|
|
1bb396bea5 | ||
|
|
16304beeab | ||
|
|
d2cfd541ed | ||
|
|
8b1aaf690c | ||
|
|
f1ca8f594c | ||
|
|
78d2e0aa25 | ||
|
|
4a3d239082 | ||
|
|
94eaee8d39 | ||
|
|
471f03ab4f | ||
|
|
9df9af822c | ||
|
|
7b49623b31 | ||
|
|
12bda2b8d0 | ||
|
|
2022f214aa | ||
|
|
786738ba81 | ||
|
|
e4f1b59475 | ||
|
|
7cb302f571 | ||
|
|
8bae91cf6e | ||
|
|
44a145fc86 | ||
|
|
a1fa62adef | ||
|
|
fe247a8f6a | ||
|
|
54e70eff13 | ||
|
|
5453f4a85d | ||
|
|
f2654390e7 | ||
|
|
777903f7da | ||
|
|
2f7892561f | ||
|
|
6d79000d6c | ||
|
|
284911cd6b | ||
|
|
70b6da108c | ||
|
|
6de3b2b262 | ||
|
|
e7554c5413 | ||
|
|
2facd2ab16 | ||
|
|
20ba252c56 | ||
|
|
eba123e381 | ||
|
|
3bb57af331 | ||
|
|
aeb991fdf3 | ||
|
|
75a14380ef | ||
|
|
0aeecf4d9a | ||
|
|
447347c760 | ||
|
|
79619de045 | ||
|
|
1af423e2df | ||
|
|
92fcf9a159 | ||
|
|
d1b1cc5c14 | ||
|
|
b9514ad82a | ||
|
|
7b3411b236 | ||
|
|
915e87e88e | ||
|
|
face505f0d | ||
|
|
dada307e79 | ||
|
|
5124c512c9 | ||
|
|
89f579c1ba | ||
|
|
0035f28035 | ||
|
|
4d556d9235 | ||
|
|
dd5ef29355 | ||
|
|
ca9922ef0a | ||
|
|
b674a0a5e8 | ||
|
|
541d0e6f5c | ||
|
|
fc96e980fb | ||
|
|
1425e3e6c7 | ||
|
|
b9d3c11032 | ||
|
|
81b65e0de3 | ||
|
|
0f80f6af27 | ||
|
|
5eedf98aae | ||
|
|
8f45a2ef3d | ||
|
|
9d51d4a572 | ||
|
|
9978c8ea48 | ||
|
|
e779358429 | ||
|
|
8d663cc3d6 | ||
|
|
a98ca30438 | ||
|
|
589cb2ac79 | ||
|
|
d973be8fea | ||
|
|
41c3b59755 | ||
|
|
8bf168da47 | ||
|
|
0d85c06be2 | ||
|
|
fadede7305 | ||
|
|
a24d31fbba | ||
|
|
c9d463443a | ||
|
|
0e3d486658 | ||
|
|
574ab6bdda | ||
|
|
a172d15463 | ||
|
|
a1362b946a | ||
|
|
6ecb4c5ece | ||
|
|
63c1320f36 | ||
|
|
bf484ce4c4 | ||
|
|
1700212f30 | ||
|
|
751ca3bf75 | ||
|
|
01be8bc64e | ||
|
|
a9e8005b14 | ||
|
|
871cff6232 | ||
|
|
1de8f5ceda | ||
|
|
dd4d2420df | ||
|
|
e9b1a98314 | ||
|
|
f0f48e8e30 | ||
|
|
89b008e1eb | ||
|
|
fb3f8439e2 | ||
|
|
f0e66d2bfb | ||
|
|
65ec254c1b | ||
|
|
47c0d95bd4 | ||
|
|
55d7bed563 | ||
|
|
9a4f3b8d8e | ||
|
|
d9c5ce15f3 | ||
|
|
292908288b | ||
|
|
7fec2661fc | ||
|
|
0cee8bc6ac | ||
|
|
37d867e47e | ||
|
|
c1338fe92f | ||
|
|
1e90a679c0 | ||
|
|
ddb7b9cb8b | ||
|
|
31e1604d99 | ||
|
|
e11c32bca5 | ||
|
|
16309dc077 | ||
|
|
8a1b496cd5 | ||
|
|
99cf99e014 | ||
|
|
f2e9b06dbd | ||
|
|
6cce7c34c2 | ||
|
|
dd2efac3ae | ||
|
|
14c0e50721 | ||
|
|
68e218d002 | ||
|
|
0a026e71b7 | ||
|
|
5175a8c7ca | ||
|
|
661e0cfc71 | ||
|
|
9ff6f35330 | ||
|
|
ab5a066780 | ||
|
|
c2d649e655 | ||
|
|
f82bb65810 | ||
|
|
e76c54b18e | ||
|
|
43658055d3 | ||
|
|
0b71fded7f | ||
|
|
03d1bada2a | ||
|
|
ad76355299 | ||
|
|
c98d409f0a | ||
|
|
9084673fd7 | ||
|
|
8d7300a522 | ||
|
|
7c10600044 | ||
|
|
277da37047 | ||
|
|
f6d697ed2b | ||
|
|
f247927244 | ||
|
|
7097e62582 | ||
|
|
e577bab263 | ||
|
|
274c40793f | ||
|
|
afbe9266e7 | ||
|
|
195ed3e6b6 | ||
|
|
ed8ace7884 | ||
|
|
14761ebec2 | ||
|
|
50d8a19397 | ||
|
|
719324981d | ||
|
|
182c06107f | ||
|
|
d4d4852a0c | ||
|
|
f352fed313 | ||
|
|
5dfce2d199 | ||
|
|
6aafac544b | ||
|
|
2589c29b1c | ||
|
|
076c38526a | ||
|
|
9fd5cfa777 | ||
|
|
c01db8783d | ||
|
|
77101a96a1 | ||
|
|
46863b470c | ||
|
|
736c1f7e1e | ||
|
|
4cf9679334 | ||
|
|
89c9290602 | ||
|
|
3ba75c5a62 | ||
|
|
4ceaed7183 | ||
|
|
f310497d47 | ||
|
|
b3d9e0d1b0 | ||
|
|
7c3d96d0e7 | ||
|
|
cdd730e627 | ||
|
|
21cca34392 | ||
|
|
d64427d726 | ||
|
|
87ccacb99f | ||
|
|
b37773c630 | ||
|
|
4765a90f97 | ||
|
|
29587cd07c | ||
|
|
2651d99676 | ||
|
|
44e5525e6f | ||
|
|
5e48f6898d | ||
|
|
08d48f42ad | ||
|
|
4600dbcda5 | ||
|
|
c1b2ff20de | ||
|
|
94bcaa71e6 | ||
|
|
67bdccbda6 | ||
|
|
f89dabbd0a | ||
|
|
76eee60ad5 | ||
|
|
ac4e4959eb | ||
|
|
7316635aba | ||
|
|
feb277049f | ||
|
|
35d5febd7d | ||
|
|
97a3bfca4e | ||
|
|
cf58133e06 | ||
|
|
0d1811a4ae | ||
|
|
2a8bb715f0 | ||
|
|
cf93869d3f | ||
|
|
77976a5253 | ||
|
|
5af46aaa2e | ||
|
|
679c634459 | ||
|
|
cc33fc2822 | ||
|
|
4f4b10fd86 | ||
|
|
26789115b6 | ||
|
|
87a28dcb9f | ||
|
|
1ad99f39e2 | ||
|
|
dfbeec199e | ||
|
|
41a99ec29d | ||
|
|
7cee3aa1f1 | ||
|
|
b86d8099f1 | ||
|
|
9c4a0043dd | ||
|
|
5cde755976 | ||
|
|
51aab7b656 | ||
|
|
791dc213fa | ||
|
|
8404165db3 | ||
|
|
aee52c01a3 | ||
|
|
58e16b0c48 | ||
|
|
3225ac88c0 | ||
|
|
38b8b85ec2 | ||
|
|
278f748c1c | ||
|
|
874df40303 | ||
|
|
0465d8ce80 | ||
|
|
e9b3f5fd43 | ||
|
|
c49bc2418b | ||
|
|
fd21f5195d | ||
|
|
a1aa63fa06 | ||
|
|
b0a619c714 | ||
|
|
ec648f2c6f | ||
|
|
7ac7c72e03 | ||
|
|
f1d00dbd7f | ||
|
|
f644aba7a8 | ||
|
|
ed75287c8c | ||
|
|
a035c73c41 | ||
|
|
781a619262 | ||
|
|
e08b63cc9f | ||
|
|
97ed57a252 | ||
|
|
71f1779c8c | ||
|
|
116f44cade | ||
|
|
2ebdd689ab | ||
|
|
beecc1a718 | ||
|
|
557ba2adc1 | ||
|
|
89780c1283 | ||
|
|
411e7f86c1 | ||
|
|
610fce67e1 | ||
|
|
d9350cd3ed | ||
|
|
54d6643a1f | ||
|
|
f664420628 | ||
|
|
d4d4c6de68 | ||
|
|
b4313599f8 | ||
|
|
6866f8f0a9 | ||
|
|
3cb478214b | ||
|
|
8d7b4d6446 | ||
|
|
cc6b0bcd72 | ||
|
|
831564cf48 | ||
|
|
4e59b62026 | ||
|
|
f345b9b0ff | ||
|
|
d4fb88a8c4 | ||
|
|
2cb1b6be46 | ||
|
|
feda315c2b | ||
|
|
41219a7d85 | ||
|
|
813a201b6a | ||
|
|
1bc40d48fe | ||
|
|
ddb4e51938 | ||
|
|
9ee67c343d | ||
|
|
bb816eae83 | ||
|
|
92bd446d09 | ||
|
|
a7526fa9c4 | ||
|
|
9b6ff487da | ||
|
|
9d61490743 | ||
|
|
9cce6e41fa | ||
|
|
a56dbdf502 | ||
|
|
7d5f27fa34 | ||
|
|
a61ba0db22 | ||
|
|
7f02a889e2 | ||
|
|
88d388a574 | ||
|
|
4d5a659e1e | ||
|
|
ecfcf1071d | ||
|
|
ec385d45e9 | ||
|
|
985107bb4b | ||
|
|
eb340c74ac | ||
|
|
9a16b33f00 | ||
|
|
0acebc5916 | ||
|
|
8442d9fe5f | ||
|
|
5318dd1a80 | ||
|
|
d916180ec8 | ||
|
|
6e5aa08ee0 | ||
|
|
83e115cde5 | ||
|
|
a3a92cd5dd | ||
|
|
16fc10fb0f | ||
|
|
6bba8b57d4 | ||
|
|
f45c75a137 | ||
|
|
3348df0652 | ||
|
|
3b6a01b63d | ||
|
|
a3554a95c5 | ||
|
|
6fa9af20c0 | ||
|
|
e23437dfa4 | ||
|
|
dcf9eaad77 | ||
|
|
768df05692 | ||
|
|
e0d5d35e32 | ||
|
|
a718a05414 | ||
|
|
953391d9d0 | ||
|
|
4ca229fd42 | ||
|
|
693a43efc8 | ||
|
|
f3d4c931f5 | ||
|
|
ece21315b1 | ||
|
|
318cc57ffe | ||
|
|
ba17924174 | ||
|
|
6bc9daa6ee | ||
|
|
2d8bc95bae | ||
|
|
520eb4a932 | ||
|
|
03733516cc | ||
|
|
e561130336 | ||
|
|
169fa2e7b7 | ||
|
|
6d6db996fb | ||
|
|
f8642bfd94 | ||
|
|
dfdd6bf533 | ||
|
|
91cef71048 | ||
|
|
3daf632384 | ||
|
|
dba5fb9dfa | ||
|
|
70d10a0bb2 | ||
|
|
97291b806a | ||
|
|
c583f008e9 | ||
|
|
b182eba56f | ||
|
|
de5da0e7ce | ||
|
|
10d599f26a | ||
|
|
6b76ed8098 | ||
|
|
a1286d0d4d | ||
|
|
eef3a3afeb | ||
|
|
c980fd0e77 | ||
|
|
3e8f9aa31c | ||
|
|
afebfe5f4f |
86
.github/workflows/check_pypi_version.yml
vendored
Normal file
86
.github/workflows/check_pypi_version.yml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: Check PyPI Version
|
||||
|
||||
# Check to be sure `pip install aider-chat` installs the most recently published version.
|
||||
# If dependencies get yanked, it may render the latest version uninstallable.
|
||||
# See https://github.com/Aider-AI/aider/issues/3699 for example.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run once a day at midnight UTC
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch: # Allows manual triggering
|
||||
|
||||
jobs:
|
||||
check_version:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install aider-chat
|
||||
run: pip install aider-chat
|
||||
|
||||
- name: Get installed aider version
|
||||
id: installed_version
|
||||
run: |
|
||||
set -x # Enable debugging output
|
||||
aider_version_output=$(aider --version)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: 'aider --version' command failed."
|
||||
exit 1
|
||||
fi
|
||||
echo "Raw aider --version output: $aider_version_output"
|
||||
|
||||
# Extract version number (format X.Y.Z)
|
||||
version_num=$(echo "$aider_version_output" | grep -oP '\d+\.\d+\.\d+')
|
||||
|
||||
# Check if grep found anything
|
||||
if [ -z "$version_num" ]; then
|
||||
echo "Error: Could not extract version number using grep -oP '\d+\.\d+\.\d+' from output: $aider_version_output"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Extracted version number: $version_num"
|
||||
echo "version=$version_num" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for all tags
|
||||
|
||||
- name: Get latest tag
|
||||
id: latest_tag
|
||||
run: |
|
||||
set -x # Enable debugging output
|
||||
# Fetch all tags from remote just in case
|
||||
git fetch --tags origin main
|
||||
# Get the latest tag that strictly matches vX.Y.Z (no suffixes like .dev)
|
||||
# List all tags, sort by version descending, filter for exact pattern, take the first one
|
||||
latest_tag=$(git tag --sort=-v:refname | grep -P '^v\d+\.\d+\.\d+$' | head -n 1)
|
||||
|
||||
if [ -z "$latest_tag" ]; then
|
||||
echo "Error: Could not find any tags matching the pattern '^v\d+\.\d+\.\d+$'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Latest non-dev tag: $latest_tag"
|
||||
# Remove 'v' prefix for comparison
|
||||
tag_num=${latest_tag#v}
|
||||
echo "Extracted tag number: $tag_num"
|
||||
echo "tag=$tag_num" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Compare versions
|
||||
run: |
|
||||
echo "Installed version: ${{ steps.installed_version.outputs.version }}"
|
||||
echo "Latest tag version: ${{ steps.latest_tag.outputs.tag }}"
|
||||
if [ "${{ steps.installed_version.outputs.version }}" != "${{ steps.latest_tag.outputs.tag }}" ]; then
|
||||
echo "Error: Installed aider version (${{ steps.installed_version.outputs.version }}) does not match the latest tag (${{ steps.latest_tag.outputs.tag }})."
|
||||
exit 1
|
||||
fi
|
||||
echo "Versions match."
|
||||
48
.github/workflows/pre-commit.yml
vendored
Normal file
48
.github/workflows/pre-commit.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
name: pre-commit
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RAW_LOG: pre-commit.log
|
||||
CS_XML: pre-commit.xml
|
||||
steps:
|
||||
- run: sudo apt-get update && sudo apt-get install cppcheck uncrustify
|
||||
if: false
|
||||
- uses: actions/checkout@v4
|
||||
- run: python -m pip install pre-commit
|
||||
- uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ~/.cache/pre-commit/
|
||||
key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Run pre-commit hooks
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
run: |
|
||||
set -o pipefail
|
||||
pre-commit gc
|
||||
pre-commit run --show-diff-on-failure --color=always --all-files | tee ${RAW_LOG}
|
||||
- name: Convert Raw Log to Checkstyle format (launch action)
|
||||
uses: mdeweerd/logToCheckStyle@v2025.1.1
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
in: ${{ env.RAW_LOG }}
|
||||
# out: ${{ env.CS_XML }}
|
||||
- uses: actions/cache/save@v4
|
||||
if: ${{ ! cancelled() }}
|
||||
with:
|
||||
path: ~/.cache/pre-commit/
|
||||
key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Provide log as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ ! cancelled() }}
|
||||
with:
|
||||
name: precommit-logs
|
||||
path: |
|
||||
${{ env.RAW_LOG }}
|
||||
${{ env.CS_XML }}
|
||||
retention-days: 2
|
||||
2
.github/workflows/ubuntu-tests.yml
vendored
2
.github/workflows/ubuntu-tests.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
|
||||
2
.github/workflows/windows-tests.yml
vendored
2
.github/workflows/windows-tests.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
|
||||
90
.github/workflows/windows_check_pypi_version.yml
vendored
Normal file
90
.github/workflows/windows_check_pypi_version.yml
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
name: Windows Check PyPI Version
|
||||
|
||||
# Check to be sure `pip install aider-chat` installs the most recently published version on Windows.
|
||||
# If dependencies get yanked, it may render the latest version uninstallable.
|
||||
# See https://github.com/Aider-AI/aider/issues/3699 for example.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run once a day at 1 AM UTC (offset from Ubuntu check)
|
||||
- cron: '0 1 * * *'
|
||||
workflow_dispatch: # Allows manual triggering
|
||||
|
||||
jobs:
|
||||
check_version:
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
defaults:
|
||||
run:
|
||||
shell: pwsh # Use PowerShell for all run steps
|
||||
|
||||
steps:
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install aider-chat
|
||||
run: pip install aider-chat
|
||||
|
||||
- name: Get installed aider version
|
||||
id: installed_version
|
||||
run: |
|
||||
Write-Host "Running 'aider --version'..."
|
||||
$aider_version_output = aider --version
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Error "Error: 'aider --version' command failed."
|
||||
exit 1
|
||||
}
|
||||
Write-Host "Raw aider --version output: $aider_version_output"
|
||||
|
||||
# Extract version number (format X.Y.Z) using PowerShell regex
|
||||
$match = [regex]::Match($aider_version_output, '\d+\.\d+\.\d+')
|
||||
|
||||
if (-not $match.Success) {
|
||||
Write-Error "Error: Could not extract version number using regex '\d+\.\d+\.\d+' from output: $aider_version_output"
|
||||
exit 1
|
||||
}
|
||||
$version_num = $match.Value
|
||||
|
||||
Write-Host "Extracted version number: $version_num"
|
||||
echo "version=$version_num" >> $env:GITHUB_OUTPUT
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for all tags
|
||||
|
||||
- name: Get latest tag
|
||||
id: latest_tag
|
||||
run: |
|
||||
Write-Host "Fetching tags..."
|
||||
# Fetch all tags from remote just in case
|
||||
git fetch --tags origin main
|
||||
Write-Host "Getting latest non-dev tag..."
|
||||
# Get the latest tag that strictly matches vX.Y.Z (no suffixes like .dev)
|
||||
# List all tags, sort by version descending, filter for exact pattern, take the first one
|
||||
$latest_tag = (git tag --sort=-v:refname | Select-String -Pattern '^v\d+\.\d+\.\d+$' | Select-Object -First 1).Line
|
||||
|
||||
if (-not $latest_tag) {
|
||||
Write-Error "Error: Could not find any tags matching the pattern '^v\d+\.\d+\.\d+$'"
|
||||
exit 1
|
||||
}
|
||||
|
||||
Write-Host "Latest non-dev tag: $latest_tag"
|
||||
# Remove 'v' prefix for comparison
|
||||
$tag_num = $latest_tag.Substring(1)
|
||||
Write-Host "Extracted tag number: $tag_num"
|
||||
echo "tag=$tag_num" >> $env:GITHUB_OUTPUT
|
||||
|
||||
- name: Compare versions
|
||||
run: |
|
||||
Write-Host "Installed version: ${{ steps.installed_version.outputs.version }}"
|
||||
Write-Host "Latest tag version: ${{ steps.latest_tag.outputs.tag }}"
|
||||
if ("${{ steps.installed_version.outputs.version }}" -ne "${{ steps.latest_tag.outputs.tag }}") {
|
||||
Write-Error "Error: Installed aider version (${{ steps.installed_version.outputs.version }}) does not match the latest tag (${{ steps.latest_tag.outputs.tag }})."
|
||||
exit 1
|
||||
}
|
||||
Write-Host "Versions match."
|
||||
@@ -18,5 +18,6 @@ repos:
|
||||
rev: v2.2.6
|
||||
hooks:
|
||||
- id: codespell
|
||||
args: ["--skip", "aider/website/docs/languages.md"]
|
||||
additional_dependencies:
|
||||
- tomli
|
||||
|
||||
213
HISTORY.md
213
HISTORY.md
@@ -2,6 +2,211 @@
|
||||
|
||||
### main branch
|
||||
|
||||
- Bumped configargparse to 1.7.1 as 1.7 was pulled.
|
||||
- Added shell tab completion for file path arguments (by saviour) and for `--edit-format`/`--editor-edit-format` options.
|
||||
- Improved OpenRouter model metadata handling by introducing a local cache, increasing reliability and performance.
|
||||
- The `/settings` command now displays detailed metadata for active main, editor, and weak models.
|
||||
- Fixed an issue where files explicitly added via the command line were not correctly ignored if listed in `.gitignore`.
|
||||
- Improved automatic commit messages by providing more context during their generation, by wangboxue.
|
||||
- Aider wrote 89% of the code in this release.
|
||||
|
||||
### Aider v0.83.1
|
||||
|
||||
- Improved user language detection by correctly normalizing hyphenated language codes (e.g., `en-US` to `en`) and enhancing the validation of locale results.
|
||||
- Prevented Aider from instructing the LLM to reply in 'C' or 'POSIX' when these are detected as the system locale.
|
||||
- Displayed a spinner with the model name when generating commit messages.
|
||||
|
||||
### Aider v0.83.0
|
||||
|
||||
- Added support for `gemini-2.5-pro-preview-05-06` models.
|
||||
- Added support for `qwen3-235b` models.
|
||||
- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
|
||||
- Added a spinner animation while waiting for the LLM to start streaming its response.
|
||||
- Updated the spinner animation to a Knight Rider style.
|
||||
- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
|
||||
- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
|
||||
- Marked Gemini 2.5 Pro preview models as `overeager` by default.
|
||||
- Commit message prompt specifies the user's language.
|
||||
- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
|
||||
- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
|
||||
- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
|
||||
- Added tracking of total tokens sent and received, now included in benchmark statistics.
|
||||
- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
|
||||
- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
|
||||
- Improved cost calculation using `litellm.completion_cost` where available.
|
||||
- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
|
||||
- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
|
||||
- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
|
||||
- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
|
||||
- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
|
||||
- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
|
||||
- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
|
||||
- Improved display of filenames in the prompt header using rich Text formatting.
|
||||
- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
|
||||
- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
|
||||
- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
|
||||
- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
|
||||
- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
|
||||
- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
|
||||
- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
|
||||
- Dropped support for Python 3.9.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.82.3
|
||||
|
||||
- Add support for `gemini-2.5-flash-preview-04-17` models.
|
||||
- Improved robustness of edit block parsing when filenames start with backticks or fences.
|
||||
- Add new `udiff-simple` edit format, for Gemini 2.5 Pro.
|
||||
- Update default weak/editor models for Gemini 2.5 Pro models to use `gemini-2.5-flash-preview-04-17`.
|
||||
- Instruct models to reply in the user's detected system language.
|
||||
- Fix parsing of diffs for newly created files (`--- /dev/null`).
|
||||
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
|
||||
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
|
||||
- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Skip scanning files larger than 1MB for AI comments (`--watch`).
|
||||
|
||||
### Aider v0.82.2
|
||||
|
||||
- Fix editing shell files with diff-fenced, by zjy1412.
|
||||
- Improve robustness of patch application by allowing multiple update/delete actions for the same file within a single response.
|
||||
- Update prompts to instruct LLMs to consolidate all edits for a given file into a single block within the patch.
|
||||
|
||||
### Aider v0.82.1
|
||||
|
||||
- Added support for `o3` and `o4-mini` including provider-specific versions for OpenAI, OpenRouter, and Azure.
|
||||
- Added support for Azure specific `gpt-4.1` and `gpt-4.1-mini` models.
|
||||
- Disabled streaming for `o3` models since you need identity verification to stream.
|
||||
- Fixed handling of file paths in unified diffs, especially those generated by git.
|
||||
|
||||
### Aider v0.82.0
|
||||
|
||||
- Support for GPT 4.1, mini and nano.
|
||||
- Added new `patch` edit format for OpenAI's GPT-4.1 model.
|
||||
- Improved support for using architect mode with Gemini 2.5 Pro.
|
||||
- Added new `editor-diff`, `editor-whole`, and `editor-diff-fenced` edit formats.
|
||||
- Bugfix for automatically selecting the best edit format to use in architect mode.
|
||||
- Added support for `grok-3-fast-beta` and `grok-3-mini-fast-beta` models.
|
||||
- Aider wrote 92% of the code in this release.
|
||||
|
||||
### Aider v0.81.3
|
||||
|
||||
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
|
||||
- Updated default settings for Grok models.
|
||||
|
||||
### Aider v0.81.2
|
||||
|
||||
- Add support for `xai/grok-3-beta`, `xai/grok-3-mini-beta`, `openrouter/x-ai/grok-3-beta`, `openrouter/x-ai/grok-3-mini-beta`, and `openrouter/openrouter/optimus-alpha` models.
|
||||
- Add alias "grok3" for `xai/grok-3-beta`.
|
||||
- Add alias "optimus" for `openrouter/openrouter/optimus-alpha`.
|
||||
- Fix URL extraction from error messages.
|
||||
- Allow adding files by full path even if a file with the same basename is already in the chat.
|
||||
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
|
||||
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
|
||||
- Commit messages generated by aider are now lowercase, by Anton Ödman.
|
||||
|
||||
### Aider v0.81.1
|
||||
|
||||
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
|
||||
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
|
||||
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
|
||||
|
||||
### Aider v0.81.0
|
||||
|
||||
- Added support for the `openrouter/openrouter/quasar-alpha` model.
|
||||
- Run with `aider --model quasar`
|
||||
- Offer OpenRouter OAuth authentication if an OpenRouter model is specified but the API key is missing.
|
||||
- Prevent retrying API calls when the provider reports insufficient credits.
|
||||
- Improve URL detection to exclude trailing double quotes.
|
||||
- Aider wrote 86% of the code in this release.
|
||||
|
||||
### Aider v0.80.4
|
||||
|
||||
- Bumped deps to pickup litellm change to properly display the root cause of OpenRouter "choices" errors.
|
||||
|
||||
### Aider v0.80.3
|
||||
|
||||
- Improve error message for OpenRouter API connection issues to mention potential rate limiting or upstream provider issues.
|
||||
- Configure weak models (`gemini/gemini-2.0-flash` and `openrouter/google/gemini-2.0-flash-exp:free`) for Gemini 2.5 Pro models.
|
||||
- Add model metadata for `openrouter/google/gemini-2.0-flash-exp:free`.
|
||||
|
||||
### Aider v0.80.2
|
||||
|
||||
- Bumped deps.
|
||||
|
||||
### Aider v0.80.1
|
||||
|
||||
- Updated deps for yanked fsspec and aiohttp packages #3699
|
||||
- Removed redundant dependency check during OpenRouter OAuth flow, by Claudia Pellegrino.
|
||||
|
||||
### Aider v0.80.0
|
||||
|
||||
- OpenRouter OAuth integration:
|
||||
- Offer to OAuth against OpenRouter if no model and keys are provided.
|
||||
- Select OpenRouter default model based on free/paid tier status if `OPENROUTER_API_KEY` is set and no model is specified.
|
||||
- Prioritize `gemini/gemini-2.5-pro-exp-03-25` if `GEMINI_API_KEY` is set, and `vertex_ai/gemini-2.5-pro-exp-03-25` if `VERTEXAI_PROJECT` is set, when no model is specified.
|
||||
- Validate user-configured color settings on startup and warn/disable invalid ones.
|
||||
- Warn at startup if `--stream` and `--cache-prompts` are used together, as cost estimates may be inaccurate.
|
||||
- Boost repomap ranking for files whose path components match identifiers mentioned in the chat.
|
||||
- Change web scraping timeout from an error to a warning, allowing scraping to continue with potentially incomplete content.
|
||||
- Left-align markdown headings in the terminal output, by Peter Schilling.
|
||||
- Update edit format to the new model's default when switching models with `/model`, if the user was using the old model's default format.
|
||||
- Add `Ctrl-X Ctrl-E` keybinding to edit the current input buffer in an external editor, by Matteo Landi.
|
||||
- Fix linting errors for filepaths containing shell metacharacters, by Mir Adnan ALI.
|
||||
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
|
||||
- Add repomap support for the Scala language, by Vasil Markoukin.
|
||||
- Fixed bug in `/run` that was preventing auto-testing.
|
||||
- Fix bug preventing `UnboundLocalError` during git tree traversal.
|
||||
- Handle `GitCommandNotFound` error if git is not installed or not in PATH.
|
||||
- Handle `FileNotFoundError` if the current working directory is deleted while aider is running.
|
||||
- Fix completion menu current item color styling, by Andrey Ivanov.
|
||||
- Aider wrote 87% of the code in this release.
|
||||
|
||||
### Aider v0.79.2
|
||||
|
||||
- Added 'gemini' alias for gemini-2.5-pro model.
|
||||
- Updated Gemini 2.5 Pro max output tokens to 64k.
|
||||
- Added support for Lisp-style semicolon comments in file watcher, by Matteo Landi.
|
||||
- Added OpenRouter API error detection and retries.
|
||||
- Added openrouter/deepseek-chat-v3-0324 model.
|
||||
- Aider wrote 93% of the code in this release.
|
||||
|
||||
### Aider v0.79.1
|
||||
|
||||
- Improved model listing to include all models in fuzzy matching, including those provided by aider (not litellm).
|
||||
|
||||
### Aider v0.79.0
|
||||
|
||||
- Added support for Gemini 2.5 Pro models.
|
||||
- Added support for DeepSeek V3 0324 model.
|
||||
- Added a new `/context` command that automatically identifies which files need to be edited for a given request.
|
||||
- Added `/edit` as an alias for the `/editor` command.
|
||||
- Added "overeager" mode for Claude 3.7 Sonnet models to try and keep it working within the requested scope.
|
||||
- Aider wrote 65% of the code in this release.
|
||||
|
||||
### Aider v0.78.0
|
||||
|
||||
- Added support for thinking tokens for OpenRouter Sonnet 3.7.
|
||||
- Added commands to switch between model types: `/editor-model` for Editor Model, and `/weak-model` for Weak Model, by csala.
|
||||
- Added model setting validation to ignore `--reasoning-effort` and `--thinking-tokens` if the model doesn't support them.
|
||||
- Added `--check-model-accepts-settings` flag (default: true) to force unsupported model settings.
|
||||
- Annotated which models support reasoning_effort and thinking_tokens settings in the model settings data.
|
||||
- Improved code block rendering in markdown output with better padding using NoInsetMarkdown.
|
||||
- Added `--git-commit-verify` flag (default: False) to control whether git commit hooks are bypassed.
|
||||
- Fixed autocompletion for `/ask`, `/code`, and `/architect` commands, by shladnik.
|
||||
- Added vi-like behavior when pressing enter in multiline-mode while in vi normal/navigation-mode, by Marco Mayer.
|
||||
- Added AWS_PROFILE support for Bedrock models, allowing use of AWS profiles instead of explicit credentials, by lentil32.
|
||||
- Enhanced `--aiderignore` argument to resolve both absolute and relative paths, by mopemope.
|
||||
- Improved platform information handling to gracefully handle retrieval errors.
|
||||
- Aider wrote 92% of the code in this release.
|
||||
|
||||
### Aider v0.77.1
|
||||
|
||||
- Bumped dependencies to pickup litellm fix for Ollama.
|
||||
- Added support for `openrouter/google/gemma-3-27b-it` model.
|
||||
- Updated exclude patterns for help documentation.
|
||||
|
||||
### Aider v0.77.0
|
||||
|
||||
- Big upgrade in [programming languages supported](https://aider.chat/docs/languages.html) by adopting [tree-sitter-language-pack](https://github.com/Goldziher/tree-sitter-language-pack/).
|
||||
- 130 new languages with linter support.
|
||||
- 20 new languages with repo-map support.
|
||||
@@ -215,7 +420,7 @@
|
||||
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
|
||||
- New `--copy-paste` mode.
|
||||
- New `/copy-context` command.
|
||||
- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- [Set API keys and other environment variables for all providers from command line or YAML conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- New `--api-key provider=key` setting.
|
||||
- New `--set-env VAR=value` setting.
|
||||
- Added bash and zsh support to `--watch-files`.
|
||||
@@ -383,7 +588,7 @@
|
||||
|
||||
### Aider v0.59.1
|
||||
|
||||
- Check for obsolete `yes: true` in yaml config, show helpful error.
|
||||
- Check for obsolete `yes: true` in YAML config, show helpful error.
|
||||
- Model settings for openrouter/anthropic/claude-3.5-sonnet:beta
|
||||
|
||||
### Aider v0.59.0
|
||||
@@ -393,7 +598,7 @@
|
||||
- Still auto-completes the full paths of the repo files like `/add`.
|
||||
- Now supports globs like `src/**/*.py`
|
||||
- Renamed `--yes` to `--yes-always`.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` yaml key.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` YAML key.
|
||||
- Existing YAML and .env files will need to be updated.
|
||||
- Can still abbreviate to `--yes` on the command line.
|
||||
- Config file now uses standard YAML list syntax with ` - list entries`, one per line.
|
||||
@@ -600,7 +805,7 @@
|
||||
- Use `--map-refresh <always|files|manual|auto>` to configure.
|
||||
- Improved cost estimate logic for caching.
|
||||
- Improved editing performance on Jupyter Notebook `.ipynb` files.
|
||||
- Show which config yaml file is loaded with `--verbose`.
|
||||
- Show which config YAML file is loaded with `--verbose`.
|
||||
- Bumped dependency versions.
|
||||
- Bugfix: properly load `.aider.models.metadata.json` data.
|
||||
- Bugfix: Using `--msg /ask ...` caused an exception.
|
||||
|
||||
252
README.md
252
README.md
@@ -1,144 +1,178 @@
|
||||
<p align="center">
|
||||
<a href="https://aider.chat/"><img src="https://aider.chat/assets/logo.svg" alt="Aider Logo" width="300"></a>
|
||||
</p>
|
||||
|
||||
<!-- Edit README.md, not index.md -->
|
||||
<h1 align="center">
|
||||
AI Pair Programming in Your Terminal
|
||||
</h1>
|
||||
|
||||
# Aider is AI pair programming in your terminal
|
||||
|
||||
Aider lets you pair program with LLMs,
|
||||
to edit code in your local git repository.
|
||||
Start a new project or work with an existing code base.
|
||||
Aider works best with Claude 3.7 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
<p align="center">
|
||||
Aider lets you pair program with LLMs to start a new project or build on your existing codebase.
|
||||
</p>
|
||||
|
||||
<!-- SCREENCAST START -->
|
||||
<p align="center">
|
||||
<img
|
||||
src="https://aider.chat/assets/screencast.svg"
|
||||
alt="aider screencast"
|
||||
>
|
||||
</p>
|
||||
<!-- SCREENCAST END -->
|
||||
|
||||
<!-- VIDEO START
|
||||
<p align="center">
|
||||
<video style="max-width: 100%; height: auto;" autoplay loop muted playsinline>
|
||||
<source src="/assets/shell-cmds-small.mp4" type="video/mp4">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
</p>
|
||||
VIDEO END -->
|
||||
|
||||
<p align="center">
|
||||
<a href="https://discord.gg/Tv2uQnR88V">
|
||||
<img src="https://img.shields.io/badge/Join-Discord-blue.svg"/>
|
||||
</a>
|
||||
<a href="https://aider.chat/docs/install.html">
|
||||
<img src="https://img.shields.io/badge/Read-Docs-green.svg"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## Getting started
|
||||
<!--[[[cog
|
||||
# We can't "include" here.
|
||||
# Because this page is rendered by GitHub as the repo README
|
||||
cog.out(open("aider/website/_includes/get-started.md").read())
|
||||
from scripts.homepage import get_badges_md
|
||||
text = get_badges_md()
|
||||
cog.out(text)
|
||||
]]]-->
|
||||
<a href="https://github.com/Aider-AI/aider/stargazers"><img alt="GitHub Stars" title="Total number of GitHub stars the Aider project has received"
|
||||
src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=github&color=f1c40f&labelColor=555555"/></a>
|
||||
<a href="https://pypi.org/project/aider-chat/"><img alt="PyPI Downloads" title="Total number of installations via pip from PyPI"
|
||||
src="https://img.shields.io/badge/📦%20Installs-2.3M-2ecc71?style=flat-square&labelColor=555555"/></a>
|
||||
<img alt="Tokens per week" title="Number of tokens processed weekly by Aider users"
|
||||
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-15B-3498db?style=flat-square&labelColor=555555"/>
|
||||
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
|
||||
src="https://img.shields.io/badge/🏆%20OpenRouter-Top%2020-9b59b6?style=flat-square&labelColor=555555"/></a>
|
||||
<a href="https://aider.chat/HISTORY.html"><img alt="Singularity" title="Percentage of the new code in Aider's last release written by Aider itself"
|
||||
src="https://img.shields.io/badge/🔄%20Singularity-54%25-e74c3c?style=flat-square&labelColor=555555"/></a>
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
||||
If you already have python 3.8-3.13 installed, you can get started quickly like this:
|
||||
## Features
|
||||
|
||||
### [Cloud and local LLMs](https://aider.chat/docs/llms.html)
|
||||
|
||||
<a href="https://aider.chat/docs/llms.html"><img src="https://aider.chat/assets/icons/brain.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Aider works best with Claude 3.7 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o, but can connect to almost any LLM, including local models.
|
||||
|
||||
<br>
|
||||
|
||||
### [Maps your codebase](https://aider.chat/docs/repomap.html)
|
||||
|
||||
<a href="https://aider.chat/docs/repomap.html"><img src="https://aider.chat/assets/icons/map-outline.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Aider makes a map of your entire codebase, which helps it work well in larger projects.
|
||||
|
||||
<br>
|
||||
|
||||
### [100+ code languages](https://aider.chat/docs/languages.html)
|
||||
|
||||
<a href="https://aider.chat/docs/languages.html"><img src="https://aider.chat/assets/icons/code-tags.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Aider works with most popular programming languages: python, javascript, rust, ruby, go, cpp, php, html, css, and dozens more.
|
||||
|
||||
<br>
|
||||
|
||||
### [Git integration](https://aider.chat/docs/git.html)
|
||||
|
||||
<a href="https://aider.chat/docs/git.html"><img src="https://aider.chat/assets/icons/source-branch.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Aider automatically commits changes with sensible commit messages. Use familiar git tools to easily diff, manage and undo AI changes.
|
||||
|
||||
<br>
|
||||
|
||||
### [Use in your IDE](https://aider.chat/docs/usage/watch.html)
|
||||
|
||||
<a href="https://aider.chat/docs/usage/watch.html"><img src="https://aider.chat/assets/icons/monitor.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Use aider from within your favorite IDE or editor. Ask for changes by adding comments to your code and aider will get to work.
|
||||
|
||||
<br>
|
||||
|
||||
### [Images & web pages](https://aider.chat/docs/usage/images-urls.html)
|
||||
|
||||
<a href="https://aider.chat/docs/usage/images-urls.html"><img src="https://aider.chat/assets/icons/image-multiple.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Add images and web pages to the chat to provide visual context, screenshots, reference docs, etc.
|
||||
|
||||
<br>
|
||||
|
||||
### [Voice-to-code](https://aider.chat/docs/usage/voice.html)
|
||||
|
||||
<a href="https://aider.chat/docs/usage/voice.html"><img src="https://aider.chat/assets/icons/microphone.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Speak with aider about your code! Request new features, test cases or bug fixes using your voice and let aider implement the changes.
|
||||
|
||||
<br>
|
||||
|
||||
### [Linting & testing](https://aider.chat/docs/usage/lint-test.html)
|
||||
|
||||
<a href="https://aider.chat/docs/usage/lint-test.html"><img src="https://aider.chat/assets/icons/check-all.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Automatically lint and test your code every time aider makes changes. Aider can fix problems detected by your linters and test suites.
|
||||
|
||||
<br>
|
||||
|
||||
### [Copy/paste to web chat](https://aider.chat/docs/usage/copypaste.html)
|
||||
|
||||
<a href="https://aider.chat/docs/usage/copypaste.html"><img src="https://aider.chat/assets/icons/content-copy.svg" width="32" height="32" align="left" valign="middle" style="margin-right:10px"></a>
|
||||
Work with any LLM via its web chat interface. Aider streamlines copy/pasting code context and edits back and forth with a browser.
|
||||
|
||||
## Getting Started
|
||||
|
||||
```bash
|
||||
python -m pip install aider-install
|
||||
aider-install
|
||||
|
||||
# Change directory into your code base
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
# DeepSeek
|
||||
aider --model deepseek --api-key deepseek=<key>
|
||||
|
||||
# Work with Claude 3.7 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
# Claude 3.7 Sonnet
|
||||
aider --model sonnet --api-key anthropic=<key>
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
# o3-mini
|
||||
aider --model o3-mini --api-key openai=<key>
|
||||
```
|
||||
<!--[[[end]]]-->
|
||||
|
||||
See the
|
||||
[installation instructions](https://aider.chat/docs/install.html)
|
||||
and
|
||||
[usage documentation](https://aider.chat/docs/usage.html)
|
||||
for more details.
|
||||
See the [installation instructions](https://aider.chat/docs/install.html) and [usage documentation](https://aider.chat/docs/usage.html) for more details.
|
||||
|
||||
## Features
|
||||
## More Information
|
||||
|
||||
- Run aider with the files you want to edit: `aider <file1> <file2> ...`
|
||||
- Ask for changes:
|
||||
- Add new features or test cases.
|
||||
- Describe a bug.
|
||||
- Paste in an error message or GitHub issue URL.
|
||||
- Refactor code.
|
||||
- Update docs.
|
||||
- Aider will edit your files to complete your request.
|
||||
- Aider [automatically git commits](https://aider.chat/docs/git.html) changes with a sensible commit message.
|
||||
- [Use aider inside your favorite editor or IDE](https://aider.chat/docs/usage/watch.html).
|
||||
- Aider works with [most popular languages](https://aider.chat/docs/languages.html): python, javascript, typescript, php, html, css, and more...
|
||||
- Aider can edit multiple files at once for complex requests.
|
||||
- Aider uses a [map of your entire git repo](https://aider.chat/docs/repomap.html), which helps it work well in larger codebases.
|
||||
- Edit files in your editor or IDE while chatting with aider,
|
||||
and it will always use the latest version.
|
||||
Pair program with AI.
|
||||
- [Add images to the chat](https://aider.chat/docs/usage/images-urls.html) (GPT-4o, Claude 3.5 Sonnet, etc).
|
||||
- [Add URLs to the chat](https://aider.chat/docs/usage/images-urls.html) and aider will read their content.
|
||||
- [Code with your voice](https://aider.chat/docs/usage/voice.html).
|
||||
- Aider works best with Claude 3.7 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
|
||||
|
||||
## Top tier performance
|
||||
|
||||
[Aider has one of the top scores on SWE Bench](https://aider.chat/2024/06/02/main-swe-bench.html).
|
||||
SWE Bench is a challenging software engineering benchmark where aider
|
||||
solved *real* GitHub issues from popular open source
|
||||
projects like django, scikitlearn, matplotlib, etc.
|
||||
|
||||
## More info
|
||||
|
||||
- [Documentation](https://aider.chat/)
|
||||
- [Installation](https://aider.chat/docs/install.html)
|
||||
- [Usage](https://aider.chat/docs/usage.html)
|
||||
- [Tutorial videos](https://aider.chat/docs/usage/tutorials.html)
|
||||
### Documentation
|
||||
- [Installation Guide](https://aider.chat/docs/install.html)
|
||||
- [Usage Guide](https://aider.chat/docs/usage.html)
|
||||
- [Tutorial Videos](https://aider.chat/docs/usage/tutorials.html)
|
||||
- [Connecting to LLMs](https://aider.chat/docs/llms.html)
|
||||
- [Configuration](https://aider.chat/docs/config.html)
|
||||
- [Configuration Options](https://aider.chat/docs/config.html)
|
||||
- [Troubleshooting](https://aider.chat/docs/troubleshooting.html)
|
||||
- [FAQ](https://aider.chat/docs/faq.html)
|
||||
|
||||
### Community & Resources
|
||||
- [LLM Leaderboards](https://aider.chat/docs/leaderboards/)
|
||||
- [GitHub](https://github.com/Aider-AI/aider)
|
||||
- [Discord](https://discord.gg/Tv2uQnR88V)
|
||||
- [GitHub Repository](https://github.com/Aider-AI/aider)
|
||||
- [Discord Community](https://discord.gg/Y7X7bhMQFV)
|
||||
- [Blog](https://aider.chat/blog/)
|
||||
|
||||
## Kind Words From Users
|
||||
|
||||
## Kind words from users
|
||||
- *"My life has changed... Aider... It's going to rock your world."* — [Eric S. Raymond on X](https://x.com/esrtweet/status/1910809356381413593)
|
||||
- *"The best free open source AI coding assistant."* — [IndyDevDan on YouTube](https://youtu.be/YALpX8oOn78)
|
||||
- *"The best AI coding assistant so far."* — [Matthew Berman on YouTube](https://www.youtube.com/watch?v=df8afeb1FY8)
|
||||
- *"Aider ... has easily quadrupled my coding productivity."* — [SOLAR_FIELDS on Hacker News](https://news.ycombinator.com/item?id=36212100)
|
||||
- *"It's a cool workflow... Aider's ergonomics are perfect for me."* — [qup on Hacker News](https://news.ycombinator.com/item?id=38185326)
|
||||
- *"It's really like having your senior developer live right in your Git repo - truly amazing!"* — [rappster on GitHub](https://github.com/Aider-AI/aider/issues/124)
|
||||
- *"What an amazing tool. It's incredible."* — [valyagolev on GitHub](https://github.com/Aider-AI/aider/issues/6#issue-1722897858)
|
||||
- *"Aider is such an astounding thing!"* — [cgrothaus on GitHub](https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700)
|
||||
- *"It was WAY faster than I would be getting off the ground and making the first few working versions."* — [Daniel Feldman on X](https://twitter.com/d_feldman/status/1662295077387923456)
|
||||
- *"THANK YOU for Aider! It really feels like a glimpse into the future of coding."* — [derwiki on Hacker News](https://news.ycombinator.com/item?id=38205643)
|
||||
- *"It's just amazing. It is freeing me to do things I felt were out my comfort zone before."* — [Dougie on Discord](https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656)
|
||||
- *"This project is stellar."* — [funkytaco on GitHub](https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008)
|
||||
- *"Amazing project, definitely the best AI coding assistant I've used."* — [joshuavial on GitHub](https://github.com/Aider-AI/aider/issues/84)
|
||||
- *"I absolutely love using Aider ... It makes software development feel so much lighter as an experience."* — [principalideal0 on Discord](https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468)
|
||||
- *"I have been recovering from ... surgeries ... aider ... has allowed me to continue productivity."* — [codeninja on Reddit](https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG)
|
||||
- *"I am an aider addict. I'm getting so much more work done, but in less time."* — [dandandan on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470)
|
||||
- *"Aider... blows everything else out of the water hands down, there's no competition whatsoever."* — [SystemSculpt on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548)
|
||||
- *"Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing."* — [Josh Dingus on Discord](https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548)
|
||||
- *"Hands down, this is the best AI coding assistant tool so far."* — [IndyDevDan on YouTube](https://www.youtube.com/watch?v=MPYFPvxfGZs)
|
||||
- *"[Aider] changed my daily coding workflows. It's mind-blowing how ...(it)... can change your life."* — [maledorak on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264)
|
||||
- *"Best agent for actual dev work in existing codebases."* — [Nick Dobos on X](https://twitter.com/NickADobos/status/1690408967963652097?s=20)
|
||||
- *"One of my favorite pieces of software. Blazing trails on new paradigms!"* — [Chris Wall on X](https://x.com/chris65536/status/1905053299251798432)
|
||||
- *"Aider has been revolutionary for me and my work."* — [Starry Hope on X](https://x.com/starryhopeblog/status/1904985812137132056)
|
||||
- *"Try aider! One of the best ways to vibe code."* — [Chris Wall on X](https://x.com/Chris65536/status/1905053418961391929)
|
||||
- *"Aider is hands down the best. And it's free and opensource."* — [AriyaSavakaLurker on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/)
|
||||
- *"Aider is also my best friend."* — [jzn21 on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
|
||||
- *"Try Aider, it's worth it."* — [jorgejhms on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
|
||||
- *"I like aider :)"* — [Chenwei Cui on X](https://x.com/ccui42/status/1904965344999145698)
|
||||
- *"Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes ... while keeping the developer in control."* — [Reilly Sweetland on X](https://x.com/rsweetland/status/1904963807237259586)
|
||||
- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)
|
||||
- *"Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone."* — [Joshua D Vander Hook on X](https://x.com/jodavaho/status/1911154899057795218)
|
||||
- *"thanks to aider, i have started and finished three personal projects within the last two days"* — [joseph stalzyn on X](https://x.com/anitaheeder/status/1908338609645904160)
|
||||
- *"Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words."* — [koleok on Discord](https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783)
|
||||
- *"Aider ... is the tool to benchmark against."* — [BeetleB on Hacker News](https://news.ycombinator.com/item?id=43930201)
|
||||
- *"aider is really cool"* — [kache on X](https://x.com/yacineMTB/status/1911224442430124387)
|
||||
|
||||
- *The best free open source AI coding assistant.* -- [IndyDevDan](https://youtu.be/YALpX8oOn78)
|
||||
- *The best AI coding assistant so far.* -- [Matthew Berman](https://www.youtube.com/watch?v=df8afeb1FY8)
|
||||
- *Aider ... has easily quadrupled my coding productivity.* -- [SOLAR_FIELDS](https://news.ycombinator.com/item?id=36212100)
|
||||
- *It's a cool workflow... Aider's ergonomics are perfect for me.* -- [qup](https://news.ycombinator.com/item?id=38185326)
|
||||
- *It's really like having your senior developer live right in your Git repo - truly amazing!* -- [rappster](https://github.com/Aider-AI/aider/issues/124)
|
||||
- *What an amazing tool. It's incredible.* -- [valyagolev](https://github.com/Aider-AI/aider/issues/6#issue-1722897858)
|
||||
- *Aider is such an astounding thing!* -- [cgrothaus](https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700)
|
||||
- *It was WAY faster than I would be getting off the ground and making the first few working versions.* -- [Daniel Feldman](https://twitter.com/d_feldman/status/1662295077387923456)
|
||||
- *THANK YOU for Aider! It really feels like a glimpse into the future of coding.* -- [derwiki](https://news.ycombinator.com/item?id=38205643)
|
||||
- *It's just amazing. It is freeing me to do things I felt were out my comfort zone before.* -- [Dougie](https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656)
|
||||
- *This project is stellar.* -- [funkytaco](https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008)
|
||||
- *Amazing project, definitely the best AI coding assistant I've used.* -- [joshuavial](https://github.com/Aider-AI/aider/issues/84)
|
||||
- *I absolutely love using Aider ... It makes software development feel so much lighter as an experience.* -- [principalideal0](https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468)
|
||||
- *I have been recovering from multiple shoulder surgeries ... and have used aider extensively. It has allowed me to continue productivity.* -- [codeninja](https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG)
|
||||
- *I am an aider addict. I'm getting so much more work done, but in less time.* -- [dandandan](https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470)
|
||||
- *After wasting $100 on tokens trying to find something better, I'm back to Aider. It blows everything else out of the water hands down, there's no competition whatsoever.* -- [SystemSculpt](https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548)
|
||||
- *Aider is amazing, coupled with Sonnet 3.5 it’s quite mind blowing.* -- [Josh Dingus](https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548)
|
||||
- *Hands down, this is the best AI coding assistant tool so far.* -- [IndyDevDan](https://www.youtube.com/watch?v=MPYFPvxfGZs)
|
||||
- *[Aider] changed my daily coding workflows. It's mind-blowing how a single Python application can change your life.* -- [maledorak](https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264)
|
||||
- *Best agent for actual dev work in existing codebases.* -- [Nick Dobos](https://twitter.com/NickADobos/status/1690408967963652097?s=20)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from packaging import version
|
||||
|
||||
__version__ = "0.77.1.dev"
|
||||
__version__ = "0.83.3.dev"
|
||||
safe_version = __version__
|
||||
|
||||
try:
|
||||
|
||||
145
aider/args.py
145
aider/args.py
@@ -3,8 +3,10 @@
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import configargparse
|
||||
import shtab
|
||||
|
||||
from aider import __version__
|
||||
from aider.args_formatter import (
|
||||
@@ -17,6 +19,15 @@ from aider.deprecated import add_deprecated_model_args
|
||||
from .dump import dump # noqa: F401
|
||||
|
||||
|
||||
def resolve_aiderignore_path(path_str, git_root=None):
|
||||
path = Path(path_str)
|
||||
if path.is_absolute():
|
||||
return str(path)
|
||||
elif git_root:
|
||||
return str(Path(git_root) / path)
|
||||
return str(path)
|
||||
|
||||
|
||||
def default_env_file(git_root):
|
||||
return os.path.join(git_root, ".env") if git_root else ".env"
|
||||
|
||||
@@ -29,10 +40,22 @@ def get_parser(default_config_files, git_root):
|
||||
config_file_parser_class=configargparse.YAMLConfigFileParser,
|
||||
auto_env_var_prefix="AIDER_",
|
||||
)
|
||||
# List of valid edit formats for argparse validation & shtab completion.
|
||||
# Dynamically gather them from the registered coder classes so the list
|
||||
# stays in sync if new formats are added.
|
||||
from aider import coders as _aider_coders
|
||||
|
||||
edit_format_choices = sorted(
|
||||
{
|
||||
c.edit_format
|
||||
for c in _aider_coders.__all__
|
||||
if hasattr(c, "edit_format") and c.edit_format is not None
|
||||
}
|
||||
)
|
||||
group = parser.add_argument_group("Main model")
|
||||
group.add_argument(
|
||||
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--model",
|
||||
metavar="MODEL",
|
||||
@@ -99,13 +122,13 @@ def get_parser(default_config_files, git_root):
|
||||
metavar="MODEL_SETTINGS_FILE",
|
||||
default=".aider.model.settings.yml",
|
||||
help="Specify a file with aider model settings for unknown models",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--model-metadata-file",
|
||||
metavar="MODEL_METADATA_FILE",
|
||||
default=".aider.model.metadata.json",
|
||||
help="Specify a file with context window and costs for unknown models",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--alias",
|
||||
action="append",
|
||||
@@ -138,6 +161,7 @@ def get_parser(default_config_files, git_root):
|
||||
"--edit-format",
|
||||
"--chat-mode",
|
||||
metavar="EDIT_FORMAT",
|
||||
choices=edit_format_choices,
|
||||
default=None,
|
||||
help="Specify what edit format the LLM should use (default depends on model)",
|
||||
)
|
||||
@@ -172,6 +196,7 @@ def get_parser(default_config_files, git_root):
|
||||
group.add_argument(
|
||||
"--editor-edit-format",
|
||||
metavar="EDITOR_EDIT_FORMAT",
|
||||
choices=edit_format_choices,
|
||||
default=None,
|
||||
help="Specify the edit format for the editor model (default: depends on editor model)",
|
||||
)
|
||||
@@ -181,6 +206,14 @@ def get_parser(default_config_files, git_root):
|
||||
default=True,
|
||||
help="Only work with models that have meta-data available (default: True)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--check-model-accepts-settings",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help=(
|
||||
"Check if model accepts settings like reasoning_effort/thinking_tokens (default: True)"
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--max-chat-history-tokens",
|
||||
type=int,
|
||||
@@ -243,13 +276,13 @@ def get_parser(default_config_files, git_root):
|
||||
metavar="INPUT_HISTORY_FILE",
|
||||
default=default_input_history_file,
|
||||
help=f"Specify the chat input history file (default: {default_input_history_file})",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--chat-history-file",
|
||||
metavar="CHAT_HISTORY_FILE",
|
||||
default=default_chat_history_file,
|
||||
help=f"Specify the chat history file (default: {default_chat_history_file})",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--restore-chat-history",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
@@ -261,7 +294,7 @@ def get_parser(default_config_files, git_root):
|
||||
metavar="LLM_HISTORY_FILE",
|
||||
default=None,
|
||||
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("Output settings")
|
||||
@@ -380,12 +413,14 @@ def get_parser(default_config_files, git_root):
|
||||
default_aiderignore_file = (
|
||||
os.path.join(git_root, ".aiderignore") if git_root else ".aiderignore"
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
"--aiderignore",
|
||||
metavar="AIDERIGNORE",
|
||||
type=lambda path_str: resolve_aiderignore_path(path_str, git_root),
|
||||
default=default_aiderignore_file,
|
||||
help="Specify the aider ignore file (default: .aiderignore in git root)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--subtree-only",
|
||||
action="store_true",
|
||||
@@ -407,14 +442,20 @@ def get_parser(default_config_files, git_root):
|
||||
group.add_argument(
|
||||
"--attribute-author",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Attribute aider code changes in the git author name (default: True)",
|
||||
default=None,
|
||||
help=(
|
||||
"Attribute aider code changes in the git author name (default: True). If explicitly set"
|
||||
" to True, overrides --attribute-co-authored-by precedence."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-committer",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Attribute aider commits in the git committer name (default: True)",
|
||||
default=None,
|
||||
help=(
|
||||
"Attribute aider commits in the git committer name (default: True). If explicitly set"
|
||||
" to True, overrides --attribute-co-authored-by precedence for aider edits."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-commit-message-author",
|
||||
@@ -428,6 +469,22 @@ def get_parser(default_config_files, git_root):
|
||||
default=False,
|
||||
help="Prefix all commit messages with 'aider: ' (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-co-authored-by",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help=(
|
||||
"Attribute aider edits using the Co-authored-by trailer in the commit message"
|
||||
" (default: False). If True, this takes precedence over default --attribute-author and"
|
||||
" --attribute-committer behavior unless they are explicitly set to True."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--git-commit-verify",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help="Enable/disable git pre-commit hooks with --no-verify (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--commit",
|
||||
action="store_true",
|
||||
@@ -509,7 +566,7 @@ def get_parser(default_config_files, git_root):
|
||||
"--analytics-log",
|
||||
metavar="ANALYTICS_LOG_FILE",
|
||||
help="Specify a file to log analytics events",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--analytics-disable",
|
||||
action="store_true",
|
||||
@@ -576,7 +633,7 @@ def get_parser(default_config_files, git_root):
|
||||
"Specify a file containing the message to send the LLM, process reply, then exit"
|
||||
" (disables chat mode)"
|
||||
),
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--gui",
|
||||
"--browser",
|
||||
@@ -594,7 +651,7 @@ def get_parser(default_config_files, git_root):
|
||||
"--apply",
|
||||
metavar="FILE",
|
||||
help="Apply the changes from the given file instead of running the chat (debug)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--apply-clipboard-edits",
|
||||
action="store_true",
|
||||
@@ -644,18 +701,24 @@ def get_parser(default_config_files, git_root):
|
||||
|
||||
######
|
||||
group = parser.add_argument_group("Other settings")
|
||||
group.add_argument(
|
||||
"--disable-playwright",
|
||||
action="store_true",
|
||||
help="Never prompt for or attempt to install Playwright for web scraping (default: False).",
|
||||
default=False,
|
||||
)
|
||||
group.add_argument(
|
||||
"--file",
|
||||
action="append",
|
||||
metavar="FILE",
|
||||
help="specify a file to edit (can be used multiple times)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--read",
|
||||
action="append",
|
||||
metavar="FILE",
|
||||
help="specify a read-only file (can be used multiple times)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--vim",
|
||||
action="store_true",
|
||||
@@ -685,7 +748,7 @@ def get_parser(default_config_files, git_root):
|
||||
"--load",
|
||||
metavar="LOAD_FILE",
|
||||
help="Load and execute /commands from a file on launch",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--encoding",
|
||||
default="utf-8",
|
||||
@@ -706,7 +769,7 @@ def get_parser(default_config_files, git_root):
|
||||
"Specify the config file (default: search for .aider.conf.yml in git root, cwd"
|
||||
" or home directory)"
|
||||
),
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
# This is a duplicate of the argument in the preparser and is a no-op by this time of
|
||||
# argument parsing, but it's here so that the help is displayed as expected.
|
||||
group.add_argument(
|
||||
@@ -714,7 +777,7 @@ def get_parser(default_config_files, git_root):
|
||||
metavar="ENV_FILE",
|
||||
default=default_env_file(git_root),
|
||||
help="Specify the .env file to load (default: .env in git root)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--suggest-shell-commands",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
@@ -762,6 +825,17 @@ def get_parser(default_config_files, git_root):
|
||||
help="Specify which editor to use for the /editor command",
|
||||
)
|
||||
|
||||
supported_shells_list = sorted(list(shtab.SUPPORTED_SHELLS))
|
||||
group.add_argument(
|
||||
"--shell-completions",
|
||||
metavar="SHELL",
|
||||
choices=supported_shells_list,
|
||||
help=(
|
||||
"Print shell completion script for the specified SHELL and exit. Supported shells:"
|
||||
f" {', '.join(supported_shells_list)}. Example: aider --shell-completions bash"
|
||||
),
|
||||
)
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("Deprecated model settings")
|
||||
# Add deprecated model shortcut arguments
|
||||
@@ -810,13 +884,34 @@ def get_sample_dotenv():
|
||||
|
||||
|
||||
def main():
|
||||
arg = sys.argv[1] if len(sys.argv[1:]) else None
|
||||
|
||||
if arg == "md":
|
||||
print(get_md_help())
|
||||
elif arg == "dotenv":
|
||||
print(get_sample_dotenv())
|
||||
if len(sys.argv) > 1:
|
||||
command = sys.argv[1]
|
||||
else:
|
||||
command = "yaml" # Default to yaml if no command is given
|
||||
|
||||
if command == "md":
|
||||
print(get_md_help())
|
||||
elif command == "dotenv":
|
||||
print(get_sample_dotenv())
|
||||
elif command == "yaml":
|
||||
print(get_sample_yaml())
|
||||
elif command == "completion":
|
||||
if len(sys.argv) > 2:
|
||||
shell = sys.argv[2]
|
||||
if shell not in shtab.SUPPORTED_SHELLS:
|
||||
print(f"Error: Unsupported shell '{shell}'.", file=sys.stderr)
|
||||
print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
parser = get_parser([], None)
|
||||
parser.prog = "aider" # Set the program name on the parser
|
||||
print(shtab.complete(parser, shell=shell))
|
||||
else:
|
||||
print("Error: Please specify a shell for completion.", file=sys.stderr)
|
||||
print(f"Usage: python {sys.argv[0]} completion <shell_name>", file=sys.stderr)
|
||||
print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Default to YAML for any other unrecognized argument, or if 'yaml' was explicitly passed
|
||||
print(get_sample_yaml())
|
||||
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ class YamlHelpFormatter(argparse.HelpFormatter):
|
||||
# Place in your home dir, or at the root of your git repo.
|
||||
##########################################################
|
||||
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the yaml
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the YAML
|
||||
# config file. Keys for all APIs can be stored in a .env file
|
||||
# https://aider.chat/docs/config/dotenv.html
|
||||
|
||||
@@ -143,7 +143,10 @@ class YamlHelpFormatter(argparse.HelpFormatter):
|
||||
default = "true"
|
||||
|
||||
if default:
|
||||
parts.append(f"#{switch}: {default}\n")
|
||||
if "#" in default:
|
||||
parts.append(f'#{switch}: "{default}"\n')
|
||||
else:
|
||||
parts.append(f"#{switch}: {default}\n")
|
||||
elif action.nargs in ("*", "+") or isinstance(action, argparse._AppendAction):
|
||||
parts.append(f"#{switch}: xxx")
|
||||
parts.append("## Specify multiple values like this:")
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
from .architect_coder import ArchitectCoder
|
||||
from .ask_coder import AskCoder
|
||||
from .base_coder import Coder
|
||||
from .context_coder import ContextCoder
|
||||
from .editblock_coder import EditBlockCoder
|
||||
from .editblock_fenced_coder import EditBlockFencedCoder
|
||||
from .editor_diff_fenced_coder import EditorDiffFencedCoder
|
||||
from .editor_editblock_coder import EditorEditBlockCoder
|
||||
from .editor_whole_coder import EditorWholeFileCoder
|
||||
from .help_coder import HelpCoder
|
||||
from .patch_coder import PatchCoder
|
||||
from .udiff_coder import UnifiedDiffCoder
|
||||
from .udiff_simple import UnifiedDiffSimpleCoder
|
||||
from .wholefile_coder import WholeFileCoder
|
||||
|
||||
# from .single_wholefile_func_coder import SingleWholeFileFunctionCoder
|
||||
@@ -18,9 +22,13 @@ __all__ = [
|
||||
EditBlockCoder,
|
||||
EditBlockFencedCoder,
|
||||
WholeFileCoder,
|
||||
PatchCoder,
|
||||
UnifiedDiffCoder,
|
||||
UnifiedDiffSimpleCoder,
|
||||
# SingleWholeFileFunctionCoder,
|
||||
ArchitectCoder,
|
||||
EditorEditBlockCoder,
|
||||
EditorWholeFileCoder,
|
||||
EditorDiffFencedCoder,
|
||||
ContextCoder,
|
||||
]
|
||||
|
||||
@@ -8,7 +8,7 @@ class AskPrompts(CoderPrompts):
|
||||
Answer questions about the supplied code.
|
||||
Always reply to the user in {language}.
|
||||
|
||||
Describe code changes however you like. Don't use SEARCH/REPLACE blocks!
|
||||
If you need to describe code changes, do so *briefly*.
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
@@ -32,4 +32,4 @@ Here are summaries of some files present in my git repo.
|
||||
If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.
|
||||
"""
|
||||
|
||||
system_reminder = ""
|
||||
system_reminder = "{final_reminders}"
|
||||
|
||||
@@ -15,10 +15,19 @@ import time
|
||||
import traceback
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
|
||||
# Optional dependency: used to convert locale codes (eg ``en_US``)
|
||||
# into human-readable language names (eg ``English``).
|
||||
try:
|
||||
from babel import Locale # type: ignore
|
||||
except ImportError: # Babel not installed – we will fall back to a small mapping
|
||||
Locale = None
|
||||
from json.decoder import JSONDecodeError
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from aider import __version__, models, prompts, urls, utils
|
||||
from aider.analytics import Analytics
|
||||
from aider.commands import Commands
|
||||
@@ -38,6 +47,7 @@ from aider.repo import ANY_GIT_ERROR, GitRepo
|
||||
from aider.repomap import RepoMap
|
||||
from aider.run_cmd import run_cmd
|
||||
from aider.utils import format_content, format_messages, format_tokens, is_image_file
|
||||
from aider.waiting import WaitingSpinner
|
||||
|
||||
from ..dump import dump # noqa: F401
|
||||
from .chat_chunks import ChatChunks
|
||||
@@ -101,8 +111,6 @@ class Coder:
|
||||
partial_response_content = ""
|
||||
commit_before_message = []
|
||||
message_cost = 0.0
|
||||
message_tokens_sent = 0
|
||||
message_tokens_received = 0
|
||||
add_cache_headers = False
|
||||
cache_warming_thread = None
|
||||
num_cache_warming_pings = 0
|
||||
@@ -168,6 +176,8 @@ class Coder:
|
||||
commands=from_coder.commands.clone(),
|
||||
total_cost=from_coder.total_cost,
|
||||
ignore_mentions=from_coder.ignore_mentions,
|
||||
total_tokens_sent=from_coder.total_tokens_sent,
|
||||
total_tokens_received=from_coder.total_tokens_received,
|
||||
file_watcher=from_coder.file_watcher,
|
||||
)
|
||||
use_kwargs.update(update) # override to complete the switch
|
||||
@@ -209,12 +219,12 @@ class Coder:
|
||||
output = f"{prefix}: {main_model.name} with {self.edit_format} edit format"
|
||||
|
||||
# Check for thinking token budget
|
||||
thinking_tokens = main_model.get_thinking_tokens(main_model)
|
||||
thinking_tokens = main_model.get_thinking_tokens()
|
||||
if thinking_tokens:
|
||||
output += f", {thinking_tokens} think tokens"
|
||||
|
||||
# Check for reasoning effort
|
||||
reasoning_effort = main_model.get_reasoning_effort(main_model)
|
||||
reasoning_effort = main_model.get_reasoning_effort()
|
||||
if reasoning_effort:
|
||||
output += f", reasoning {reasoning_effort}"
|
||||
|
||||
@@ -320,6 +330,8 @@ class Coder:
|
||||
chat_language=None,
|
||||
detect_urls=True,
|
||||
ignore_mentions=None,
|
||||
total_tokens_sent=0,
|
||||
total_tokens_received=0,
|
||||
file_watcher=None,
|
||||
auto_copy_context=False,
|
||||
auto_accept_architect=True,
|
||||
@@ -366,6 +378,10 @@ class Coder:
|
||||
self.need_commit_before_edits = set()
|
||||
|
||||
self.total_cost = total_cost
|
||||
self.total_tokens_sent = total_tokens_sent
|
||||
self.total_tokens_received = total_tokens_received
|
||||
self.message_tokens_sent = 0
|
||||
self.message_tokens_received = 0
|
||||
|
||||
self.verbose = verbose
|
||||
self.abs_fnames = set()
|
||||
@@ -429,6 +445,7 @@ class Coder:
|
||||
fname = Path(fname)
|
||||
if self.repo and self.repo.git_ignored_file(fname):
|
||||
self.io.tool_warning(f"Skipping {fname} that matches gitignore spec.")
|
||||
continue
|
||||
|
||||
if self.repo and self.repo.ignored_file(fname):
|
||||
self.io.tool_warning(f"Skipping {fname} that matches aiderignore spec.")
|
||||
@@ -564,6 +581,15 @@ class Coder:
|
||||
|
||||
return True
|
||||
|
||||
def _stop_waiting_spinner(self):
|
||||
"""Stop and clear the waiting spinner if it is running."""
|
||||
spinner = getattr(self, "waiting_spinner", None)
|
||||
if spinner:
|
||||
try:
|
||||
spinner.stop()
|
||||
finally:
|
||||
self.waiting_spinner = None
|
||||
|
||||
def get_abs_fnames_content(self):
|
||||
for fname in list(self.abs_fnames):
|
||||
content = self.io.read_text(fname)
|
||||
@@ -922,10 +948,11 @@ class Coder:
|
||||
else:
|
||||
self.io.tool_error(text)
|
||||
|
||||
url_pattern = re.compile(r"(https?://[^\s/$.?#].[^\s]*)")
|
||||
# Exclude double quotes from the matched URL characters
|
||||
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*)')
|
||||
urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates
|
||||
for url in urls:
|
||||
url = url.rstrip(".',\"")
|
||||
url = url.rstrip(".',\"}") # Added } to the characters to strip
|
||||
self.io.offer_url(url)
|
||||
return urls
|
||||
|
||||
@@ -934,7 +961,8 @@ class Coder:
|
||||
if not self.detect_urls:
|
||||
return inp
|
||||
|
||||
url_pattern = re.compile(r"(https?://[^\s/$.?#].[^\s]*[^\s,.])")
|
||||
# Exclude double quotes from the matched URL characters
|
||||
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*[^\s,.])')
|
||||
urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates
|
||||
group = ConfirmGroup(urls)
|
||||
for url in urls:
|
||||
@@ -951,6 +979,9 @@ class Coder:
|
||||
return inp
|
||||
|
||||
def keyboard_interrupt(self):
|
||||
# Ensure cursor is visible on exit
|
||||
Console().show_cursor(True)
|
||||
|
||||
now = time.time()
|
||||
|
||||
thresh = 2 # seconds
|
||||
@@ -1009,28 +1040,93 @@ class Coder:
|
||||
]
|
||||
self.cur_messages = []
|
||||
|
||||
def get_user_language(self):
|
||||
if self.chat_language:
|
||||
return self.chat_language
|
||||
def normalize_language(self, lang_code):
|
||||
"""
|
||||
Convert a locale code such as ``en_US`` or ``fr`` into a readable
|
||||
language name (e.g. ``English`` or ``French``). If Babel is
|
||||
available it is used for reliable conversion; otherwise a small
|
||||
built-in fallback map handles common languages.
|
||||
"""
|
||||
if not lang_code:
|
||||
return None
|
||||
|
||||
if lang_code.upper() in ("C", "POSIX"):
|
||||
return None
|
||||
|
||||
# Probably already a language name
|
||||
if (
|
||||
len(lang_code) > 3
|
||||
and "_" not in lang_code
|
||||
and "-" not in lang_code
|
||||
and lang_code[0].isupper()
|
||||
):
|
||||
return lang_code
|
||||
|
||||
# Preferred: Babel
|
||||
if Locale is not None:
|
||||
try:
|
||||
loc = Locale.parse(lang_code.replace("-", "_"))
|
||||
return loc.get_display_name("en").capitalize()
|
||||
except Exception:
|
||||
pass # Fall back to manual mapping
|
||||
|
||||
# Simple fallback for common languages
|
||||
fallback = {
|
||||
"en": "English",
|
||||
"fr": "French",
|
||||
"es": "Spanish",
|
||||
"de": "German",
|
||||
"it": "Italian",
|
||||
"pt": "Portuguese",
|
||||
"zh": "Chinese",
|
||||
"ja": "Japanese",
|
||||
"ko": "Korean",
|
||||
"ru": "Russian",
|
||||
}
|
||||
primary_lang_code = lang_code.replace("-", "_").split("_")[0].lower()
|
||||
return fallback.get(primary_lang_code, lang_code)
|
||||
|
||||
def get_user_language(self):
|
||||
"""
|
||||
Detect the user's language preference and return a human-readable
|
||||
language name such as ``English``. Detection order:
|
||||
|
||||
1. ``self.chat_language`` if explicitly set
|
||||
2. ``locale.getlocale()``
|
||||
3. ``LANG`` / ``LANGUAGE`` / ``LC_ALL`` / ``LC_MESSAGES`` environment variables
|
||||
"""
|
||||
|
||||
# Explicit override
|
||||
if self.chat_language:
|
||||
return self.normalize_language(self.chat_language)
|
||||
|
||||
# System locale
|
||||
try:
|
||||
lang = locale.getlocale()[0]
|
||||
if lang:
|
||||
return lang # Return the full language code, including country
|
||||
lang = self.normalize_language(lang)
|
||||
if lang:
|
||||
return lang
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for env_var in ["LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"]:
|
||||
# Environment variables
|
||||
for env_var in ("LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"):
|
||||
lang = os.environ.get(env_var)
|
||||
if lang:
|
||||
return lang.split(".")[
|
||||
0
|
||||
] # Return language and country, but remove encoding if present
|
||||
lang = lang.split(".")[0] # Strip encoding if present
|
||||
return self.normalize_language(lang)
|
||||
|
||||
return None
|
||||
|
||||
def get_platform_info(self):
|
||||
platform_text = f"- Platform: {platform.platform()}\n"
|
||||
platform_text = ""
|
||||
try:
|
||||
platform_text = f"- Platform: {platform.platform()}\n"
|
||||
except KeyError:
|
||||
# Skip platform info if it can't be retrieved
|
||||
platform_text = "- Platform information unavailable\n"
|
||||
|
||||
shell_var = "COMSPEC" if os.name == "nt" else "SHELL"
|
||||
shell_val = os.getenv(shell_var)
|
||||
platform_text += f"- Shell: {shell_var}={shell_val}\n"
|
||||
@@ -1071,22 +1167,33 @@ class Coder:
|
||||
return platform_text
|
||||
|
||||
def fmt_system_prompt(self, prompt):
|
||||
lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else ""
|
||||
final_reminders = []
|
||||
if self.main_model.lazy:
|
||||
final_reminders.append(self.gpt_prompts.lazy_prompt)
|
||||
if self.main_model.overeager:
|
||||
final_reminders.append(self.gpt_prompts.overeager_prompt)
|
||||
|
||||
user_lang = self.get_user_language()
|
||||
if user_lang:
|
||||
final_reminders.append(f"Reply in {user_lang}.\n")
|
||||
|
||||
platform_text = self.get_platform_info()
|
||||
|
||||
if self.suggest_shell_commands:
|
||||
shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)
|
||||
shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)
|
||||
rename_with_shell = self.gpt_prompts.rename_with_shell
|
||||
else:
|
||||
shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)
|
||||
shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(
|
||||
platform=platform_text
|
||||
)
|
||||
rename_with_shell = ""
|
||||
|
||||
if self.chat_language:
|
||||
language = self.chat_language
|
||||
if user_lang: # user_lang is the result of self.get_user_language()
|
||||
language = user_lang
|
||||
else:
|
||||
language = "the same language they are using"
|
||||
language = "the same language they are using" # Default if no specific lang detected
|
||||
|
||||
if self.fence[0] == "`" * 4:
|
||||
quad_backtick_reminder = (
|
||||
@@ -1095,24 +1202,27 @@ class Coder:
|
||||
else:
|
||||
quad_backtick_reminder = ""
|
||||
|
||||
final_reminders = "\n\n".join(final_reminders)
|
||||
|
||||
prompt = prompt.format(
|
||||
fence=self.fence,
|
||||
quad_backtick_reminder=quad_backtick_reminder,
|
||||
lazy_prompt=lazy_prompt,
|
||||
final_reminders=final_reminders,
|
||||
platform=platform_text,
|
||||
shell_cmd_prompt=shell_cmd_prompt,
|
||||
rename_with_shell=rename_with_shell,
|
||||
shell_cmd_reminder=shell_cmd_reminder,
|
||||
go_ahead_tip=self.gpt_prompts.go_ahead_tip,
|
||||
language=language,
|
||||
)
|
||||
|
||||
if self.main_model.system_prompt_prefix:
|
||||
prompt = self.main_model.system_prompt_prefix + prompt
|
||||
|
||||
return prompt
|
||||
|
||||
def format_chat_chunks(self):
|
||||
self.choose_fence()
|
||||
main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
|
||||
if self.main_model.system_prompt_prefix:
|
||||
main_sys = self.main_model.system_prompt_prefix + "\n" + main_sys
|
||||
|
||||
example_messages = []
|
||||
if self.main_model.examples_as_sys_msg:
|
||||
@@ -1321,8 +1431,13 @@ class Coder:
|
||||
utils.show_messages(messages, functions=self.functions)
|
||||
|
||||
self.multi_response_content = ""
|
||||
if self.show_pretty() and self.stream:
|
||||
self.mdstream = self.io.get_assistant_mdstream()
|
||||
if self.show_pretty():
|
||||
self.waiting_spinner = WaitingSpinner("Waiting for " + self.main_model.name)
|
||||
self.waiting_spinner.start()
|
||||
if self.stream:
|
||||
self.mdstream = self.io.get_assistant_mdstream()
|
||||
else:
|
||||
self.mdstream = None
|
||||
else:
|
||||
self.mdstream = None
|
||||
|
||||
@@ -1395,6 +1510,9 @@ class Coder:
|
||||
self.live_incremental_response(True)
|
||||
self.mdstream = None
|
||||
|
||||
# Ensure any waiting spinner is stopped
|
||||
self._stop_waiting_spinner()
|
||||
|
||||
self.partial_response_content = self.get_multi_response_content_in_progress(True)
|
||||
self.remove_reasoning_content()
|
||||
self.multi_response_content = ""
|
||||
@@ -1444,7 +1562,8 @@ class Coder:
|
||||
return
|
||||
|
||||
try:
|
||||
self.reply_completed()
|
||||
if self.reply_completed():
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
interrupted = True
|
||||
|
||||
@@ -1587,30 +1706,30 @@ class Coder:
|
||||
)
|
||||
]
|
||||
|
||||
def get_file_mentions(self, content):
|
||||
def get_file_mentions(self, content, ignore_current=False):
|
||||
words = set(word for word in content.split())
|
||||
|
||||
# drop sentence punctuation from the end
|
||||
words = set(word.rstrip(",.!;:?") for word in words)
|
||||
|
||||
# strip away all kinds of quotes
|
||||
quotes = "".join(['"', "'", "`"])
|
||||
quotes = "\"'`*_"
|
||||
words = set(word.strip(quotes) for word in words)
|
||||
|
||||
addable_rel_fnames = self.get_addable_relative_files()
|
||||
if ignore_current:
|
||||
addable_rel_fnames = self.get_all_relative_files()
|
||||
existing_basenames = {}
|
||||
else:
|
||||
addable_rel_fnames = self.get_addable_relative_files()
|
||||
|
||||
# Get basenames of files already in chat or read-only
|
||||
existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {
|
||||
os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames
|
||||
}
|
||||
# Get basenames of files already in chat or read-only
|
||||
existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {
|
||||
os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames
|
||||
}
|
||||
|
||||
mentioned_rel_fnames = set()
|
||||
fname_to_rel_fnames = {}
|
||||
for rel_fname in addable_rel_fnames:
|
||||
# Skip files that share a basename with files already in chat
|
||||
if os.path.basename(rel_fname) in existing_basenames:
|
||||
continue
|
||||
|
||||
normalized_rel_fname = rel_fname.replace("\\", "/")
|
||||
normalized_words = set(word.replace("\\", "/") for word in words)
|
||||
if normalized_rel_fname in normalized_words:
|
||||
@@ -1625,6 +1744,10 @@ class Coder:
|
||||
fname_to_rel_fnames[fname].append(rel_fname)
|
||||
|
||||
for fname, rel_fnames in fname_to_rel_fnames.items():
|
||||
# If the basename is already in chat, don't add based on a basename mention
|
||||
if fname in existing_basenames:
|
||||
continue
|
||||
# If the basename mention is unique among addable files and present in the text
|
||||
if len(rel_fnames) == 1 and fname in words:
|
||||
mentioned_rel_fnames.add(rel_fnames[0])
|
||||
|
||||
@@ -1706,6 +1829,9 @@ class Coder:
|
||||
self.io.ai_output(json.dumps(args, indent=4))
|
||||
|
||||
def show_send_output(self, completion):
|
||||
# Stop spinner once we have a response
|
||||
self._stop_waiting_spinner()
|
||||
|
||||
if self.verbose:
|
||||
print(completion)
|
||||
|
||||
@@ -1820,6 +1946,8 @@ class Coder:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if received_content:
|
||||
self._stop_waiting_spinner()
|
||||
self.partial_response_content += text
|
||||
|
||||
if self.show_pretty():
|
||||
@@ -1899,6 +2027,44 @@ class Coder:
|
||||
self.usage_report = tokens_report
|
||||
return
|
||||
|
||||
try:
|
||||
# Try and use litellm's built in cost calculator. Seems to work for non-streaming only?
|
||||
cost = litellm.completion_cost(completion_response=completion)
|
||||
except Exception:
|
||||
cost = 0
|
||||
|
||||
if not cost:
|
||||
cost = self.compute_costs_from_tokens(
|
||||
prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
||||
)
|
||||
|
||||
self.total_cost += cost
|
||||
self.message_cost += cost
|
||||
|
||||
def format_cost(value):
|
||||
if value == 0:
|
||||
return "0.00"
|
||||
magnitude = abs(value)
|
||||
if magnitude >= 0.01:
|
||||
return f"{value:.2f}"
|
||||
else:
|
||||
return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
|
||||
|
||||
cost_report = (
|
||||
f"Cost: ${format_cost(self.message_cost)} message,"
|
||||
f" ${format_cost(self.total_cost)} session."
|
||||
)
|
||||
|
||||
if cache_hit_tokens and cache_write_tokens:
|
||||
sep = "\n"
|
||||
else:
|
||||
sep = " "
|
||||
|
||||
self.usage_report = tokens_report + sep + cost_report
|
||||
|
||||
def compute_costs_from_tokens(
|
||||
self, prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
||||
):
|
||||
cost = 0
|
||||
|
||||
input_cost_per_token = self.main_model.info.get("input_cost_per_token") or 0
|
||||
@@ -1926,40 +2092,15 @@ class Coder:
|
||||
cost += prompt_tokens * input_cost_per_token
|
||||
|
||||
cost += completion_tokens * output_cost_per_token
|
||||
|
||||
self.total_cost += cost
|
||||
self.message_cost += cost
|
||||
|
||||
def format_cost(value):
|
||||
if value == 0:
|
||||
return "0.00"
|
||||
magnitude = abs(value)
|
||||
if magnitude >= 0.01:
|
||||
return f"{value:.2f}"
|
||||
else:
|
||||
return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
|
||||
|
||||
cost_report = (
|
||||
f"Cost: ${format_cost(self.message_cost)} message,"
|
||||
f" ${format_cost(self.total_cost)} session."
|
||||
)
|
||||
|
||||
if self.add_cache_headers and self.stream:
|
||||
warning = " Use --no-stream for accurate caching costs."
|
||||
self.usage_report = tokens_report + "\n" + cost_report + warning
|
||||
return
|
||||
|
||||
if cache_hit_tokens and cache_write_tokens:
|
||||
sep = "\n"
|
||||
else:
|
||||
sep = " "
|
||||
|
||||
self.usage_report = tokens_report + sep + cost_report
|
||||
return cost
|
||||
|
||||
def show_usage_report(self):
|
||||
if not self.usage_report:
|
||||
return
|
||||
|
||||
self.total_tokens_sent += self.message_tokens_sent
|
||||
self.total_tokens_received += self.message_tokens_received
|
||||
|
||||
self.io.tool_output(self.usage_report)
|
||||
|
||||
prompt_tokens = self.message_tokens_sent
|
||||
@@ -2234,7 +2375,7 @@ class Coder:
|
||||
context = self.get_context_from_history(self.cur_messages)
|
||||
|
||||
try:
|
||||
res = self.repo.commit(fnames=edited, context=context, aider_edits=True)
|
||||
res = self.repo.commit(fnames=edited, context=context, aider_edits=True, coder=self)
|
||||
if res:
|
||||
self.show_auto_commit_outcome(res)
|
||||
commit_hash, commit_message = res
|
||||
@@ -2270,7 +2411,7 @@ class Coder:
|
||||
if not self.repo:
|
||||
return
|
||||
|
||||
self.repo.commit(fnames=self.need_commit_before_edits)
|
||||
self.repo.commit(fnames=self.need_commit_before_edits, coder=self)
|
||||
|
||||
# files changed, move cur messages back behind the files messages
|
||||
# self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
|
||||
|
||||
@@ -12,6 +12,11 @@ class CoderPrompts:
|
||||
lazy_prompt = """You are diligent and tireless!
|
||||
You NEVER leave comments describing code without implementing it!
|
||||
You always COMPLETELY IMPLEMENT the needed code!
|
||||
"""
|
||||
|
||||
overeager_prompt = """Pay careful attention to the scope of the user's request.
|
||||
Do what they ask, but no more.
|
||||
Do not improve, comment, fix or modify unrelated parts of the code in any way!
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
@@ -50,3 +55,6 @@ Do not edit these files!
|
||||
shell_cmd_reminder = ""
|
||||
no_shell_cmd_prompt = ""
|
||||
no_shell_cmd_reminder = ""
|
||||
|
||||
rename_with_shell = ""
|
||||
go_ahead_tip = ""
|
||||
|
||||
53
aider/coders/context_coder.py
Normal file
53
aider/coders/context_coder.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from .base_coder import Coder
|
||||
from .context_prompts import ContextPrompts
|
||||
|
||||
|
||||
class ContextCoder(Coder):
|
||||
"""Identify which files need to be edited for a given request."""
|
||||
|
||||
edit_format = "context"
|
||||
gpt_prompts = ContextPrompts()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
if not self.repo_map:
|
||||
return
|
||||
|
||||
self.repo_map.refresh = "always"
|
||||
self.repo_map.max_map_tokens *= self.repo_map.map_mul_no_files
|
||||
self.repo_map.map_mul_no_files = 1.0
|
||||
|
||||
def reply_completed(self):
|
||||
content = self.partial_response_content
|
||||
if not content or not content.strip():
|
||||
return True
|
||||
|
||||
# dump(repr(content))
|
||||
current_rel_fnames = set(self.get_inchat_relative_files())
|
||||
mentioned_rel_fnames = set(self.get_file_mentions(content, ignore_current=True))
|
||||
|
||||
# dump(current_rel_fnames)
|
||||
# dump(mentioned_rel_fnames)
|
||||
# dump(current_rel_fnames == mentioned_rel_fnames)
|
||||
|
||||
if mentioned_rel_fnames == current_rel_fnames:
|
||||
return True
|
||||
|
||||
if self.num_reflections >= self.max_reflections - 1:
|
||||
return True
|
||||
|
||||
self.abs_fnames = set()
|
||||
for fname in mentioned_rel_fnames:
|
||||
self.add_rel_fname(fname)
|
||||
# dump(self.get_inchat_relative_files())
|
||||
|
||||
self.reflected_message = self.gpt_prompts.try_again
|
||||
|
||||
# mentioned_idents = self.get_ident_mentions(cur_msg_text)
|
||||
# if mentioned_idents:
|
||||
|
||||
return True
|
||||
|
||||
def check_for_file_mentions(self, content):
|
||||
pass
|
||||
75
aider/coders/context_prompts.py
Normal file
75
aider/coders/context_prompts.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from .base_prompts import CoderPrompts
|
||||
|
||||
|
||||
class ContextPrompts(CoderPrompts):
|
||||
main_system = """Act as an expert code analyst.
|
||||
Understand the user's question or request, solely to determine ALL the existing sources files which will need to be modified.
|
||||
Return the *complete* list of files which will need to be modified based on the user's request.
|
||||
Explain why each file is needed, including names of key classes/functions/methods/variables.
|
||||
Be sure to include or omit the names of files already added to the chat, based on whether they are actually needed or not.
|
||||
|
||||
The user will use every file you mention, regardless of your commentary.
|
||||
So *ONLY* mention the names of relevant files.
|
||||
If a file is not relevant DO NOT mention it.
|
||||
|
||||
Only return files that will need to be modified, not files that contain useful/relevant functions.
|
||||
|
||||
You are only to discuss EXISTING files and symbols.
|
||||
Only return existing files, don't suggest the names of new files or functions that we will need to create.
|
||||
|
||||
Always reply to the user in {language}.
|
||||
|
||||
Be concise in your replies.
|
||||
Return:
|
||||
1. A bulleted list of files the will need to be edited, and symbols that are highly relevant to the user's request.
|
||||
2. A list of classes/functions/methods/variables that are located OUTSIDE those files which will need to be understood. Just the symbols names, *NOT* file names.
|
||||
|
||||
# Your response *MUST* use this format:
|
||||
|
||||
## ALL files we need to modify, with their relevant symbols:
|
||||
|
||||
- alarms/buzz.py
|
||||
- `Buzzer` class which can make the needed sound
|
||||
- `Buzzer.buzz_buzz()` method triggers the sound
|
||||
- alarms/time.py
|
||||
- `Time.set_alarm(hour, minute)` to set the alarm
|
||||
|
||||
## Relevant symbols from OTHER files:
|
||||
|
||||
- AlarmManager class for setup/teardown of alarms
|
||||
- SoundFactory will be used to create a Buzzer
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
|
||||
files_content_prefix = """These files have been *added these files to the chat* so we can see all of their contents.
|
||||
*Trust this message as the true contents of the files!*
|
||||
Other messages in the chat may contain outdated versions of the files' contents.
|
||||
""" # noqa: E501
|
||||
|
||||
files_content_assistant_reply = (
|
||||
"Ok, I will use that as the true, current contents of the files."
|
||||
)
|
||||
|
||||
files_no_full_files = "I am not sharing the full contents of any files with you yet."
|
||||
|
||||
files_no_full_files_with_repo_map = ""
|
||||
files_no_full_files_with_repo_map_reply = ""
|
||||
|
||||
repo_content_prefix = """I am working with you on code in a git repository.
|
||||
Here are summaries of some files present in my git repo.
|
||||
If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.
|
||||
"""
|
||||
|
||||
system_reminder = """
|
||||
NEVER RETURN CODE!
|
||||
"""
|
||||
|
||||
try_again = """I have updated the set of files added to the chat.
|
||||
Review them to decide if this is the correct set of files or if we need to add more or remove files.
|
||||
|
||||
If this is the right set, just return the current list of files.
|
||||
Or return a smaller or larger set of files which need to be edited, with symbols that are highly relevant to the user's request.
|
||||
"""
|
||||
@@ -412,7 +412,16 @@ def strip_filename(filename, fence):
|
||||
return
|
||||
|
||||
start_fence = fence[0]
|
||||
if filename.startswith(start_fence) or filename.startswith(triple_backticks):
|
||||
if filename.startswith(start_fence):
|
||||
candidate = filename[len(start_fence) :]
|
||||
if candidate and ("." in candidate or "/" in candidate):
|
||||
return candidate
|
||||
return
|
||||
|
||||
if filename.startswith(triple_backticks):
|
||||
candidate = filename[len(triple_backticks) :]
|
||||
if candidate and ("." in candidate or "/" in candidate):
|
||||
return candidate
|
||||
return
|
||||
|
||||
filename = filename.rstrip(":")
|
||||
@@ -454,7 +463,14 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
|
||||
"```csh",
|
||||
"```tcsh",
|
||||
]
|
||||
next_is_editblock = i + 1 < len(lines) and head_pattern.match(lines[i + 1].strip())
|
||||
|
||||
# Check if the next line or the one after that is an editblock
|
||||
next_is_editblock = (
|
||||
i + 1 < len(lines)
|
||||
and head_pattern.match(lines[i + 1].strip())
|
||||
or i + 2 < len(lines)
|
||||
and head_pattern.match(lines[i + 2].strip())
|
||||
)
|
||||
|
||||
if any(line.strip().startswith(start) for start in shell_starts) and not next_is_editblock:
|
||||
shell_content = []
|
||||
|
||||
@@ -5,5 +5,6 @@ from .editblock_fenced_prompts import EditBlockFencedPrompts
|
||||
|
||||
class EditBlockFencedCoder(EditBlockCoder):
|
||||
"""A coder that uses fenced search/replace blocks for code modifications."""
|
||||
|
||||
edit_format = "diff-fenced"
|
||||
gpt_prompts = EditBlockFencedPrompts()
|
||||
|
||||
@@ -19,7 +19,7 @@ class EditBlockFencedPrompts(EditBlockPrompts):
|
||||
|
||||
Here are the *SEARCH/REPLACE* blocks:
|
||||
|
||||
{fence[0]}
|
||||
{fence[0]}python
|
||||
mathweb/flask/app.py
|
||||
<<<<<<< SEARCH
|
||||
from flask import Flask
|
||||
@@ -29,7 +29,7 @@ from flask import Flask
|
||||
>>>>>>> REPLACE
|
||||
{fence[1]}
|
||||
|
||||
{fence[0]}
|
||||
{fence[0]}python
|
||||
mathweb/flask/app.py
|
||||
<<<<<<< SEARCH
|
||||
def factorial(n):
|
||||
@@ -44,7 +44,7 @@ def factorial(n):
|
||||
>>>>>>> REPLACE
|
||||
{fence[1]}
|
||||
|
||||
{fence[0]}
|
||||
{fence[0]}python
|
||||
mathweb/flask/app.py
|
||||
<<<<<<< SEARCH
|
||||
return str(factorial(n))
|
||||
@@ -68,7 +68,7 @@ mathweb/flask/app.py
|
||||
|
||||
Here are the *SEARCH/REPLACE* blocks:
|
||||
|
||||
{fence[0]}
|
||||
{fence[0]}python
|
||||
hello.py
|
||||
<<<<<<< SEARCH
|
||||
=======
|
||||
@@ -79,7 +79,7 @@ def hello():
|
||||
>>>>>>> REPLACE
|
||||
{fence[1]}
|
||||
|
||||
{fence[0]}
|
||||
{fence[0]}python
|
||||
main.py
|
||||
<<<<<<< SEARCH
|
||||
def hello():
|
||||
@@ -93,3 +93,51 @@ from hello import hello
|
||||
""",
|
||||
),
|
||||
]
|
||||
|
||||
system_reminder = """
|
||||
# *SEARCH/REPLACE block* Rules:
|
||||
|
||||
Every *SEARCH/REPLACE block* must use this format:
|
||||
1. The opening fence and code language, eg: {fence[0]}python
|
||||
2. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.
|
||||
3. The start of search block: <<<<<<< SEARCH
|
||||
4. A contiguous chunk of lines to search for in the existing source code
|
||||
5. The dividing line: =======
|
||||
6. The lines to replace into the source code
|
||||
7. The end of the replace block: >>>>>>> REPLACE
|
||||
8. The closing fence: {fence[1]}
|
||||
|
||||
Use the *FULL* file path, as shown to you by the user.
|
||||
{quad_backtick_reminder}
|
||||
Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.
|
||||
If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.
|
||||
|
||||
*SEARCH/REPLACE* blocks will *only* replace the first match occurrence.
|
||||
Including multiple unique *SEARCH/REPLACE* blocks if needed.
|
||||
Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.
|
||||
|
||||
Keep *SEARCH/REPLACE* blocks concise.
|
||||
Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.
|
||||
Include just the changing lines, and a few surrounding lines if needed for uniqueness.
|
||||
Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.
|
||||
|
||||
Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!
|
||||
|
||||
To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.
|
||||
|
||||
Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.
|
||||
|
||||
If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
|
||||
- A new file path, including dir name if needed
|
||||
- An empty `SEARCH` section
|
||||
- The new file's contents in the `REPLACE` section
|
||||
|
||||
To rename files which have been added to the chat, use shell commands at the end of your response.
|
||||
|
||||
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
|
||||
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
|
||||
|
||||
{final_reminders}
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{shell_cmd_reminder}
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from . import shell
|
||||
from .base_prompts import CoderPrompts
|
||||
|
||||
|
||||
@@ -7,7 +8,7 @@ class EditBlockPrompts(CoderPrompts):
|
||||
main_system = """Act as an expert software developer.
|
||||
Always use best practices when coding.
|
||||
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Take requests for changes to the supplied code.
|
||||
If the request is ambiguous, ask questions.
|
||||
|
||||
@@ -28,32 +29,6 @@ You can keep asking if you then decide you need to edit more files.
|
||||
All changes to files must use this *SEARCH/REPLACE block* format.
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{shell_cmd_prompt}
|
||||
"""
|
||||
|
||||
shell_cmd_prompt = """
|
||||
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
|
||||
|
||||
Just suggest shell commands this way, not example code.
|
||||
Only suggest complete shell commands that are ready to execute, without placeholders.
|
||||
Only suggest at most a few shell commands at a time, not more than 1-3, one per line.
|
||||
Do not suggest multi-line shell commands.
|
||||
All shell commands will run from the root directory of the user's project.
|
||||
|
||||
Use the appropriate shell based on the user's system info:
|
||||
{platform}
|
||||
Examples of when to suggest shell commands:
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
"""
|
||||
|
||||
no_shell_cmd_prompt = """
|
||||
Keep in mind these details about the user's platform and environment:
|
||||
{platform}
|
||||
"""
|
||||
example_messages = [
|
||||
dict(
|
||||
@@ -181,23 +156,19 @@ If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
|
||||
- An empty `SEARCH` section
|
||||
- The new file's contents in the `REPLACE` section
|
||||
|
||||
To rename files which have been added to the chat, use shell commands at the end of your response.
|
||||
|
||||
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
|
||||
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
|
||||
|
||||
{lazy_prompt}
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{rename_with_shell}{go_ahead_tip}{final_reminders}ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
{shell_cmd_reminder}
|
||||
"""
|
||||
|
||||
shell_cmd_reminder = """
|
||||
Examples of when to suggest shell commands:
|
||||
rename_with_shell = """To rename files which have been added to the chat, use shell commands at the end of your response.
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
"""
|
||||
|
||||
go_ahead_tip = """If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
|
||||
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
|
||||
|
||||
"""
|
||||
|
||||
shell_cmd_prompt = shell.shell_cmd_prompt
|
||||
no_shell_cmd_prompt = shell.no_shell_cmd_prompt
|
||||
shell_cmd_reminder = shell.shell_cmd_reminder
|
||||
|
||||
9
aider/coders/editor_diff_fenced_coder.py
Normal file
9
aider/coders/editor_diff_fenced_coder.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .editblock_fenced_coder import EditBlockFencedCoder
|
||||
from .editor_diff_fenced_prompts import EditorDiffFencedPrompts
|
||||
|
||||
|
||||
class EditorDiffFencedCoder(EditBlockFencedCoder):
|
||||
"A coder that uses search/replace blocks, focused purely on editing files."
|
||||
|
||||
edit_format = "editor-diff-fenced"
|
||||
gpt_prompts = EditorDiffFencedPrompts()
|
||||
11
aider/coders/editor_diff_fenced_prompts.py
Normal file
11
aider/coders/editor_diff_fenced_prompts.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from .editblock_fenced_prompts import EditBlockFencedPrompts
|
||||
|
||||
|
||||
class EditorDiffFencedPrompts(EditBlockFencedPrompts):
|
||||
shell_cmd_prompt = ""
|
||||
no_shell_cmd_prompt = ""
|
||||
shell_cmd_reminder = ""
|
||||
go_ahead_tip = ""
|
||||
rename_with_shell = ""
|
||||
@@ -5,7 +5,7 @@ from .editblock_prompts import EditBlockPrompts
|
||||
|
||||
class EditorEditBlockPrompts(EditBlockPrompts):
|
||||
main_system = """Act as an expert software developer who edits source code.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Describe each change with a *SEARCH/REPLACE block* per the examples below.
|
||||
All changes to files must use this *SEARCH/REPLACE block* format.
|
||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
@@ -14,3 +14,5 @@ ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||
shell_cmd_prompt = ""
|
||||
no_shell_cmd_prompt = ""
|
||||
shell_cmd_reminder = ""
|
||||
go_ahead_tip = ""
|
||||
rename_with_shell = ""
|
||||
|
||||
@@ -5,6 +5,6 @@ from .wholefile_prompts import WholeFilePrompts
|
||||
|
||||
class EditorWholeFilePrompts(WholeFilePrompts):
|
||||
main_system = """Act as an expert software developer and make changes to source code.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Output a copy of each file that needs changes.
|
||||
"""
|
||||
|
||||
@@ -5,6 +5,7 @@ from .help_prompts import HelpPrompts
|
||||
|
||||
class HelpCoder(Coder):
|
||||
"""Interactive help and documentation about aider."""
|
||||
|
||||
edit_format = "help"
|
||||
gpt_prompts = HelpPrompts()
|
||||
|
||||
|
||||
706
aider/coders/patch_coder.py
Normal file
706
aider/coders/patch_coder.py
Normal file
@@ -0,0 +1,706 @@
|
||||
import pathlib
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from .base_coder import Coder
|
||||
from .patch_prompts import PatchPrompts
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Domain objects & Exceptions (Adapted from apply_patch.py)
|
||||
# --------------------------------------------------------------------------- #
|
||||
class DiffError(ValueError):
|
||||
"""Any problem detected while parsing or applying a patch."""
|
||||
|
||||
|
||||
class ActionType(str, Enum):
|
||||
ADD = "Add"
|
||||
DELETE = "Delete"
|
||||
UPDATE = "Update"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Chunk:
|
||||
orig_index: int = -1 # Line number in the *original* file block where the change starts
|
||||
del_lines: List[str] = field(default_factory=list)
|
||||
ins_lines: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PatchAction:
|
||||
type: ActionType
|
||||
path: str
|
||||
# For ADD:
|
||||
new_content: Optional[str] = None
|
||||
# For UPDATE:
|
||||
chunks: List[Chunk] = field(default_factory=list)
|
||||
move_path: Optional[str] = None
|
||||
|
||||
|
||||
# Type alias for the return type of get_edits
|
||||
EditResult = Tuple[str, PatchAction]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Patch:
|
||||
actions: Dict[str, PatchAction] = field(default_factory=dict)
|
||||
fuzz: int = 0 # Track fuzziness used during parsing
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Helper functions (Adapted from apply_patch.py)
|
||||
# --------------------------------------------------------------------------- #
|
||||
def _norm(line: str) -> str:
|
||||
"""Strip CR so comparisons work for both LF and CRLF input."""
|
||||
return line.rstrip("\r")
|
||||
|
||||
|
||||
def find_context_core(lines: List[str], context: List[str], start: int) -> Tuple[int, int]:
|
||||
"""Finds context block, returns start index and fuzz level."""
|
||||
if not context:
|
||||
return start, 0
|
||||
|
||||
# Exact match
|
||||
for i in range(start, len(lines) - len(context) + 1):
|
||||
if lines[i : i + len(context)] == context:
|
||||
return i, 0
|
||||
# Rstrip match
|
||||
norm_context = [s.rstrip() for s in context]
|
||||
for i in range(start, len(lines) - len(context) + 1):
|
||||
if [s.rstrip() for s in lines[i : i + len(context)]] == norm_context:
|
||||
return i, 1 # Fuzz level 1
|
||||
# Strip match
|
||||
norm_context_strip = [s.strip() for s in context]
|
||||
for i in range(start, len(lines) - len(context) + 1):
|
||||
if [s.strip() for s in lines[i : i + len(context)]] == norm_context_strip:
|
||||
return i, 100 # Fuzz level 100
|
||||
return -1, 0
|
||||
|
||||
|
||||
def find_context(lines: List[str], context: List[str], start: int, eof: bool) -> Tuple[int, int]:
|
||||
"""Finds context, handling EOF marker."""
|
||||
if eof:
|
||||
# If EOF marker, first try matching at the very end
|
||||
if len(lines) >= len(context):
|
||||
new_index, fuzz = find_context_core(lines, context, len(lines) - len(context))
|
||||
if new_index != -1:
|
||||
return new_index, fuzz
|
||||
# If not found at end, search from `start` as fallback
|
||||
new_index, fuzz = find_context_core(lines, context, start)
|
||||
return new_index, fuzz + 10_000 # Add large fuzz penalty if EOF wasn't at end
|
||||
# Normal case: search from `start`
|
||||
return find_context_core(lines, context, start)
|
||||
|
||||
|
||||
def peek_next_section(lines: List[str], index: int) -> Tuple[List[str], List[Chunk], int, bool]:
|
||||
"""
|
||||
Parses one section (context, -, + lines) of an Update block.
|
||||
Returns: (context_lines, chunks_in_section, next_index, is_eof)
|
||||
"""
|
||||
context_lines: List[str] = []
|
||||
del_lines: List[str] = []
|
||||
ins_lines: List[str] = []
|
||||
chunks: List[Chunk] = []
|
||||
mode = "keep" # Start by expecting context lines
|
||||
start_index = index
|
||||
|
||||
while index < len(lines):
|
||||
line = lines[index]
|
||||
norm_line = _norm(line)
|
||||
|
||||
# Check for section terminators
|
||||
if norm_line.startswith(
|
||||
(
|
||||
"@@",
|
||||
"*** End Patch",
|
||||
"*** Update File:",
|
||||
"*** Delete File:",
|
||||
"*** Add File:",
|
||||
"*** End of File", # Special terminator
|
||||
)
|
||||
):
|
||||
break
|
||||
if norm_line == "***": # Legacy/alternative terminator? Handle just in case.
|
||||
break
|
||||
if norm_line.startswith("***"): # Invalid line
|
||||
raise DiffError(f"Invalid patch line found in update section: {line}")
|
||||
|
||||
index += 1
|
||||
last_mode = mode
|
||||
|
||||
# Determine line type and strip prefix
|
||||
if line.startswith("+"):
|
||||
mode = "add"
|
||||
line_content = line[1:]
|
||||
elif line.startswith("-"):
|
||||
mode = "delete"
|
||||
line_content = line[1:]
|
||||
elif line.startswith(" "):
|
||||
mode = "keep"
|
||||
line_content = line[1:]
|
||||
elif line.strip() == "": # Treat blank lines in patch as context ' '
|
||||
mode = "keep"
|
||||
line_content = "" # Keep it as a blank line
|
||||
else:
|
||||
# Assume lines without prefix are context if format is loose,
|
||||
# but strict format requires ' '. Raise error for strictness.
|
||||
raise DiffError(f"Invalid line prefix in update section: {line}")
|
||||
|
||||
# If mode changes from add/delete back to keep, finalize the previous chunk
|
||||
if mode == "keep" and last_mode != "keep":
|
||||
if del_lines or ins_lines:
|
||||
chunks.append(
|
||||
Chunk(
|
||||
# orig_index is relative to the start of the *context* block found
|
||||
orig_index=len(context_lines) - len(del_lines),
|
||||
del_lines=del_lines,
|
||||
ins_lines=ins_lines,
|
||||
)
|
||||
)
|
||||
del_lines, ins_lines = [], []
|
||||
|
||||
# Collect lines based on mode
|
||||
if mode == "delete":
|
||||
del_lines.append(line_content)
|
||||
context_lines.append(line_content) # Deleted lines are part of the original context
|
||||
elif mode == "add":
|
||||
ins_lines.append(line_content)
|
||||
elif mode == "keep":
|
||||
context_lines.append(line_content)
|
||||
|
||||
# Finalize any pending chunk at the end of the section
|
||||
if del_lines or ins_lines:
|
||||
chunks.append(
|
||||
Chunk(
|
||||
orig_index=len(context_lines) - len(del_lines),
|
||||
del_lines=del_lines,
|
||||
ins_lines=ins_lines,
|
||||
)
|
||||
)
|
||||
|
||||
# Check for EOF marker
|
||||
is_eof = False
|
||||
if index < len(lines) and _norm(lines[index]) == "*** End of File":
|
||||
index += 1
|
||||
is_eof = True
|
||||
|
||||
if index == start_index and not is_eof: # Should not happen if patch is well-formed
|
||||
raise DiffError("Empty patch section found.")
|
||||
|
||||
return context_lines, chunks, index, is_eof
|
||||
|
||||
|
||||
def identify_files_needed(text: str) -> List[str]:
|
||||
"""Extracts file paths from Update and Delete actions."""
|
||||
lines = text.splitlines()
|
||||
paths = set()
|
||||
for line in lines:
|
||||
norm_line = _norm(line)
|
||||
if norm_line.startswith("*** Update File: "):
|
||||
paths.add(norm_line[len("*** Update File: ") :].strip())
|
||||
elif norm_line.startswith("*** Delete File: "):
|
||||
paths.add(norm_line[len("*** Delete File: ") :].strip())
|
||||
return list(paths)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# PatchCoder Class Implementation
|
||||
# --------------------------------------------------------------------------- #
|
||||
class PatchCoder(Coder):
|
||||
"""
|
||||
A coder that uses a custom patch format for code modifications,
|
||||
inspired by the format described in tmp.gpt41edits.txt.
|
||||
Applies patches using logic adapted from the reference apply_patch.py script.
|
||||
"""
|
||||
|
||||
edit_format = "patch"
|
||||
gpt_prompts = PatchPrompts()
|
||||
|
||||
def get_edits(self) -> List[EditResult]:
|
||||
"""
|
||||
Parses the LLM response content (containing the patch) into a list of
|
||||
tuples, where each tuple contains the file path and the PatchAction object.
|
||||
"""
|
||||
content = self.partial_response_content
|
||||
if not content or not content.strip():
|
||||
return []
|
||||
|
||||
# Check for patch sentinels
|
||||
lines = content.splitlines()
|
||||
if (
|
||||
len(lines) < 2
|
||||
or not _norm(lines[0]).startswith("*** Begin Patch")
|
||||
# Allow flexible end, might be EOF or just end of stream
|
||||
# or _norm(lines[-1]) != "*** End Patch"
|
||||
):
|
||||
# Tolerate missing sentinels if content looks like a patch action
|
||||
is_patch_like = any(
|
||||
_norm(line).startswith(
|
||||
("@@", "*** Update File:", "*** Add File:", "*** Delete File:")
|
||||
)
|
||||
for line in lines
|
||||
)
|
||||
if not is_patch_like:
|
||||
# If it doesn't even look like a patch, return empty
|
||||
self.io.tool_warning("Response does not appear to be in patch format.")
|
||||
return []
|
||||
# If it looks like a patch but lacks sentinels, try parsing anyway but warn.
|
||||
self.io.tool_warning(
|
||||
"Patch format warning: Missing '*** Begin Patch'/'*** End Patch' sentinels."
|
||||
)
|
||||
start_index = 0
|
||||
else:
|
||||
start_index = 1 # Skip "*** Begin Patch"
|
||||
|
||||
# Identify files needed for context lookups during parsing
|
||||
needed_paths = identify_files_needed(content)
|
||||
current_files: Dict[str, str] = {}
|
||||
for rel_path in needed_paths:
|
||||
abs_path = self.abs_root_path(rel_path)
|
||||
try:
|
||||
# Use io.read_text to handle potential errors/encodings
|
||||
file_content = self.io.read_text(abs_path)
|
||||
if file_content is None:
|
||||
raise DiffError(
|
||||
f"File referenced in patch not found or could not be read: {rel_path}"
|
||||
)
|
||||
current_files[rel_path] = file_content
|
||||
except FileNotFoundError:
|
||||
raise DiffError(f"File referenced in patch not found: {rel_path}")
|
||||
except IOError as e:
|
||||
raise DiffError(f"Error reading file {rel_path}: {e}")
|
||||
|
||||
try:
|
||||
# Parse the patch text using adapted logic
|
||||
patch_obj = self._parse_patch_text(lines, start_index, current_files)
|
||||
# Convert Patch object actions dict to a list of tuples (path, action)
|
||||
# for compatibility with the base Coder's prepare_to_edit method.
|
||||
results = []
|
||||
for path, action in patch_obj.actions.items():
|
||||
results.append((path, action))
|
||||
return results
|
||||
except DiffError as e:
|
||||
# Raise as ValueError for consistency with other coders' error handling
|
||||
raise ValueError(f"Error parsing patch content: {e}")
|
||||
except Exception as e:
|
||||
# Catch unexpected errors during parsing
|
||||
raise ValueError(f"Unexpected error parsing patch: {e}")
|
||||
|
||||
def _parse_patch_text(
|
||||
self, lines: List[str], start_index: int, current_files: Dict[str, str]
|
||||
) -> Patch:
|
||||
"""
|
||||
Parses patch content lines into a Patch object.
|
||||
Adapted from the Parser class in apply_patch.py.
|
||||
"""
|
||||
patch = Patch()
|
||||
index = start_index
|
||||
fuzz_accumulator = 0
|
||||
|
||||
while index < len(lines):
|
||||
line = lines[index]
|
||||
norm_line = _norm(line)
|
||||
|
||||
if norm_line == "*** End Patch":
|
||||
index += 1
|
||||
break # Successfully reached end
|
||||
|
||||
# ---------- UPDATE ---------- #
|
||||
if norm_line.startswith("*** Update File: "):
|
||||
path = norm_line[len("*** Update File: ") :].strip()
|
||||
index += 1
|
||||
if not path:
|
||||
raise DiffError("Update File action missing path.")
|
||||
|
||||
# Optional move target
|
||||
move_to = None
|
||||
if index < len(lines) and _norm(lines[index]).startswith("*** Move to: "):
|
||||
move_to = _norm(lines[index])[len("*** Move to: ") :].strip()
|
||||
index += 1
|
||||
if not move_to:
|
||||
raise DiffError("Move to action missing path.")
|
||||
|
||||
if path not in current_files:
|
||||
raise DiffError(f"Update File Error - missing file content for: {path}")
|
||||
|
||||
file_content = current_files[path]
|
||||
|
||||
existing_action = patch.actions.get(path)
|
||||
if existing_action is not None:
|
||||
# Merge additional UPDATE block into the existing one
|
||||
if existing_action.type != ActionType.UPDATE:
|
||||
raise DiffError(f"Conflicting actions for file: {path}")
|
||||
|
||||
new_action, index, fuzz = self._parse_update_file_sections(
|
||||
lines, index, file_content
|
||||
)
|
||||
existing_action.chunks.extend(new_action.chunks)
|
||||
|
||||
if move_to:
|
||||
if existing_action.move_path and existing_action.move_path != move_to:
|
||||
raise DiffError(f"Conflicting move targets for file: {path}")
|
||||
existing_action.move_path = move_to
|
||||
fuzz_accumulator += fuzz
|
||||
else:
|
||||
# First UPDATE block for this file
|
||||
action, index, fuzz = self._parse_update_file_sections(
|
||||
lines, index, file_content
|
||||
)
|
||||
action.path = path
|
||||
action.move_path = move_to
|
||||
patch.actions[path] = action
|
||||
fuzz_accumulator += fuzz
|
||||
continue
|
||||
|
||||
# ---------- DELETE ---------- #
|
||||
elif norm_line.startswith("*** Delete File: "):
|
||||
path = norm_line[len("*** Delete File: ") :].strip()
|
||||
index += 1
|
||||
if not path:
|
||||
raise DiffError("Delete File action missing path.")
|
||||
existing_action = patch.actions.get(path)
|
||||
if existing_action:
|
||||
if existing_action.type == ActionType.DELETE:
|
||||
# Duplicate delete – ignore the extra block
|
||||
self.io.tool_warning(f"Duplicate delete action for file: {path} ignored.")
|
||||
continue
|
||||
else:
|
||||
raise DiffError(f"Conflicting actions for file: {path}")
|
||||
if path not in current_files:
|
||||
raise DiffError(
|
||||
f"Delete File Error - file not found: {path}"
|
||||
) # Check against known files
|
||||
|
||||
patch.actions[path] = PatchAction(type=ActionType.DELETE, path=path)
|
||||
continue
|
||||
|
||||
# ---------- ADD ---------- #
|
||||
elif norm_line.startswith("*** Add File: "):
|
||||
path = norm_line[len("*** Add File: ") :].strip()
|
||||
index += 1
|
||||
if not path:
|
||||
raise DiffError("Add File action missing path.")
|
||||
if path in patch.actions:
|
||||
raise DiffError(f"Duplicate action for file: {path}")
|
||||
# Check if file exists in the context provided (should not for Add).
|
||||
# Note: We only have needed files, a full check requires FS access.
|
||||
# if path in current_files:
|
||||
# raise DiffError(f"Add File Error - file already exists: {path}")
|
||||
|
||||
action, index = self._parse_add_file_content(lines, index)
|
||||
action.path = path # Ensure path is set
|
||||
patch.actions[path] = action
|
||||
continue
|
||||
|
||||
# If we are here, the line is unexpected
|
||||
# Allow blank lines between actions
|
||||
if not norm_line.strip():
|
||||
index += 1
|
||||
continue
|
||||
|
||||
raise DiffError(f"Unknown or misplaced line while parsing patch: {line}")
|
||||
|
||||
# Check if we consumed the whole input or stopped early
|
||||
# Tolerate missing "*** End Patch" if we processed actions
|
||||
# if index < len(lines) and _norm(lines[index-1]) != "*** End Patch":
|
||||
# raise DiffError("Patch parsing finished unexpectedly before end of input.")
|
||||
|
||||
patch.fuzz = fuzz_accumulator
|
||||
return patch
|
||||
|
||||
def _parse_update_file_sections(
|
||||
self, lines: List[str], index: int, file_content: str
|
||||
) -> Tuple[PatchAction, int, int]:
|
||||
"""Parses all sections (@@, context, -, +) for a single Update File action."""
|
||||
action = PatchAction(type=ActionType.UPDATE, path="") # Path set by caller
|
||||
orig_lines = file_content.splitlines() # Use splitlines for consistency
|
||||
current_file_index = 0 # Track position in original file content
|
||||
total_fuzz = 0
|
||||
|
||||
while index < len(lines):
|
||||
norm_line = _norm(lines[index])
|
||||
# Check for terminators for *this* file update
|
||||
if norm_line.startswith(
|
||||
(
|
||||
"*** End Patch",
|
||||
"*** Update File:",
|
||||
"*** Delete File:",
|
||||
"*** Add File:",
|
||||
)
|
||||
):
|
||||
break # End of this file's update section
|
||||
|
||||
# Handle @@ scope lines (optional)
|
||||
scope_lines = []
|
||||
while index < len(lines) and _norm(lines[index]).startswith("@@"):
|
||||
scope_line_content = lines[index][len("@@") :].strip()
|
||||
if scope_line_content: # Ignore empty @@ lines?
|
||||
scope_lines.append(scope_line_content)
|
||||
index += 1
|
||||
|
||||
# Find the scope in the original file if specified
|
||||
if scope_lines:
|
||||
# Simple scope finding: search from current position
|
||||
# A more robust finder could handle nested scopes like the reference @@ @@
|
||||
found_scope = False
|
||||
temp_index = current_file_index
|
||||
while temp_index < len(orig_lines):
|
||||
# Check if all scope lines match sequentially from temp_index
|
||||
match = True
|
||||
for i, scope in enumerate(scope_lines):
|
||||
if (
|
||||
temp_index + i >= len(orig_lines)
|
||||
or _norm(orig_lines[temp_index + i]).strip() != scope
|
||||
):
|
||||
match = False
|
||||
break
|
||||
if match:
|
||||
current_file_index = temp_index + len(scope_lines)
|
||||
found_scope = True
|
||||
break
|
||||
temp_index += 1
|
||||
|
||||
if not found_scope:
|
||||
# Try fuzzy scope matching (strip whitespace)
|
||||
temp_index = current_file_index
|
||||
while temp_index < len(orig_lines):
|
||||
match = True
|
||||
for i, scope in enumerate(scope_lines):
|
||||
if (
|
||||
temp_index + i >= len(orig_lines)
|
||||
or _norm(orig_lines[temp_index + i]).strip() != scope.strip()
|
||||
):
|
||||
match = False
|
||||
break
|
||||
if match:
|
||||
current_file_index = temp_index + len(scope_lines)
|
||||
found_scope = True
|
||||
total_fuzz += 1 # Add fuzz for scope match difference
|
||||
break
|
||||
temp_index += 1
|
||||
|
||||
if not found_scope:
|
||||
scope_txt = "\n".join(scope_lines)
|
||||
raise DiffError(f"Could not find scope context:\n{scope_txt}")
|
||||
|
||||
# Peek and parse the next context/change section
|
||||
context_block, chunks_in_section, next_index, is_eof = peek_next_section(lines, index)
|
||||
|
||||
# Find where this context block appears in the original file
|
||||
found_index, fuzz = find_context(orig_lines, context_block, current_file_index, is_eof)
|
||||
total_fuzz += fuzz
|
||||
|
||||
if found_index == -1:
|
||||
ctx_txt = "\n".join(context_block)
|
||||
marker = "*** End of File" if is_eof else ""
|
||||
raise DiffError(
|
||||
f"Could not find patch context {marker} starting near line"
|
||||
f" {current_file_index}:\n{ctx_txt}"
|
||||
)
|
||||
|
||||
# Adjust chunk original indices to be absolute within the file
|
||||
for chunk in chunks_in_section:
|
||||
# chunk.orig_index from peek is relative to context_block start
|
||||
# We need it relative to the file start
|
||||
chunk.orig_index += found_index
|
||||
action.chunks.append(chunk)
|
||||
|
||||
# Advance file index past the matched context block
|
||||
current_file_index = found_index + len(context_block)
|
||||
# Advance line index past the processed section in the patch
|
||||
index = next_index
|
||||
|
||||
return action, index, total_fuzz
|
||||
|
||||
def _parse_add_file_content(self, lines: List[str], index: int) -> Tuple[PatchAction, int]:
|
||||
"""Parses the content (+) lines for an Add File action."""
|
||||
added_lines: List[str] = []
|
||||
while index < len(lines):
|
||||
line = lines[index]
|
||||
norm_line = _norm(line)
|
||||
# Stop if we hit another action or end marker
|
||||
if norm_line.startswith(
|
||||
(
|
||||
"*** End Patch",
|
||||
"*** Update File:",
|
||||
"*** Delete File:",
|
||||
"*** Add File:",
|
||||
)
|
||||
):
|
||||
break
|
||||
|
||||
# Expect lines to start with '+'
|
||||
if not line.startswith("+"):
|
||||
# Tolerate blank lines? Or require '+'? Reference implies '+' required.
|
||||
if norm_line.strip() == "":
|
||||
# Treat blank line as adding a blank line
|
||||
added_lines.append("")
|
||||
else:
|
||||
raise DiffError(f"Invalid Add File line (missing '+'): {line}")
|
||||
else:
|
||||
added_lines.append(line[1:]) # Strip leading '+'
|
||||
|
||||
index += 1
|
||||
|
||||
action = PatchAction(type=ActionType.ADD, path="", new_content="\n".join(added_lines))
|
||||
return action, index
|
||||
|
||||
def apply_edits(self, edits: List[PatchAction]):
|
||||
"""
|
||||
Applies the parsed PatchActions to the corresponding files.
|
||||
"""
|
||||
if not edits:
|
||||
return
|
||||
|
||||
# Group edits by original path? Not strictly needed if processed sequentially.
|
||||
|
||||
# Edits are now List[Tuple[str, PatchAction]]
|
||||
for _path_tuple_element, action in edits:
|
||||
# action is the PatchAction object
|
||||
# action.path is the canonical path within the action logic
|
||||
full_path = self.abs_root_path(action.path)
|
||||
path_obj = pathlib.Path(full_path)
|
||||
|
||||
try:
|
||||
if action.type == ActionType.ADD:
|
||||
# Check existence *before* writing
|
||||
if path_obj.exists():
|
||||
raise DiffError(f"ADD Error: File already exists: {action.path}")
|
||||
if action.new_content is None:
|
||||
# Parser should ensure this doesn't happen
|
||||
raise DiffError(f"ADD change for {action.path} has no content")
|
||||
|
||||
self.io.tool_output(f"Adding {action.path}")
|
||||
path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Ensure single trailing newline, matching reference behavior
|
||||
content_to_write = action.new_content
|
||||
if not content_to_write.endswith("\n"):
|
||||
content_to_write += "\n"
|
||||
self.io.write_text(full_path, content_to_write)
|
||||
|
||||
elif action.type == ActionType.DELETE:
|
||||
self.io.tool_output(f"Deleting {action.path}")
|
||||
if not path_obj.exists():
|
||||
self.io.tool_warning(
|
||||
f"DELETE Warning: File not found, skipping: {action.path}"
|
||||
)
|
||||
else:
|
||||
path_obj.unlink()
|
||||
|
||||
elif action.type == ActionType.UPDATE:
|
||||
if not path_obj.exists():
|
||||
raise DiffError(f"UPDATE Error: File does not exist: {action.path}")
|
||||
|
||||
current_content = self.io.read_text(full_path)
|
||||
if current_content is None:
|
||||
# Should have been caught during parsing if file was needed
|
||||
raise DiffError(f"Could not read file for UPDATE: {action.path}")
|
||||
|
||||
# Apply the update logic using the parsed chunks
|
||||
new_content = self._apply_update(current_content, action, action.path)
|
||||
|
||||
target_full_path = (
|
||||
self.abs_root_path(action.move_path) if action.move_path else full_path
|
||||
)
|
||||
target_path_obj = pathlib.Path(target_full_path)
|
||||
|
||||
if action.move_path:
|
||||
self.io.tool_output(
|
||||
f"Updating and moving {action.path} to {action.move_path}"
|
||||
)
|
||||
# Check if target exists before overwriting/moving
|
||||
if target_path_obj.exists() and full_path != target_full_path:
|
||||
self.io.tool_warning(
|
||||
"UPDATE Warning: Target file for move already exists, overwriting:"
|
||||
f" {action.move_path}"
|
||||
)
|
||||
else:
|
||||
self.io.tool_output(f"Updating {action.path}")
|
||||
|
||||
# Ensure parent directory exists for target
|
||||
target_path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.io.write_text(target_full_path, new_content)
|
||||
|
||||
# Remove original file *after* successful write to new location if moved
|
||||
if action.move_path and full_path != target_full_path:
|
||||
path_obj.unlink()
|
||||
|
||||
else:
|
||||
# Should not happen
|
||||
raise DiffError(f"Unknown action type encountered: {action.type}")
|
||||
|
||||
except (DiffError, FileNotFoundError, IOError, OSError) as e:
|
||||
# Raise a ValueError to signal failure, consistent with other coders.
|
||||
raise ValueError(f"Error applying action '{action.type}' to {action.path}: {e}")
|
||||
except Exception as e:
|
||||
# Catch unexpected errors during application
|
||||
raise ValueError(
|
||||
f"Unexpected error applying action '{action.type}' to {action.path}: {e}"
|
||||
)
|
||||
|
||||
def _apply_update(self, text: str, action: PatchAction, path: str) -> str:
|
||||
"""
|
||||
Applies UPDATE chunks to the given text content.
|
||||
Adapted from _get_updated_file in apply_patch.py.
|
||||
"""
|
||||
if action.type is not ActionType.UPDATE:
|
||||
# Should not be called otherwise, but check for safety
|
||||
raise DiffError("_apply_update called with non-update action")
|
||||
|
||||
orig_lines = text.splitlines() # Use splitlines to handle endings consistently
|
||||
dest_lines: List[str] = []
|
||||
current_orig_line_idx = 0 # Tracks index in orig_lines processed so far
|
||||
|
||||
# Sort chunks by their original index to apply them sequentially
|
||||
sorted_chunks = sorted(action.chunks, key=lambda c: c.orig_index)
|
||||
|
||||
for chunk in sorted_chunks:
|
||||
# chunk.orig_index is the absolute line number where the change starts
|
||||
# (where the first deleted line was, or where inserted lines go if no deletes)
|
||||
chunk_start_index = chunk.orig_index
|
||||
|
||||
if chunk_start_index < current_orig_line_idx:
|
||||
# This indicates overlapping chunks or incorrect indices from parsing
|
||||
raise DiffError(
|
||||
f"{path}: Overlapping or out-of-order chunk detected."
|
||||
f" Current index {current_orig_line_idx}, chunk starts at {chunk_start_index}."
|
||||
)
|
||||
|
||||
# Add lines from original file between the last chunk and this one
|
||||
dest_lines.extend(orig_lines[current_orig_line_idx:chunk_start_index])
|
||||
|
||||
# Verify that the lines to be deleted actually match the original file content
|
||||
# (The parser should have used find_context, but double-check here)
|
||||
num_del = len(chunk.del_lines)
|
||||
actual_deleted_lines = orig_lines[chunk_start_index : chunk_start_index + num_del]
|
||||
|
||||
# Use the same normalization as find_context_core for comparison robustness
|
||||
norm_chunk_del = [_norm(s).strip() for s in chunk.del_lines]
|
||||
norm_actual_del = [_norm(s).strip() for s in actual_deleted_lines]
|
||||
|
||||
if norm_chunk_del != norm_actual_del:
|
||||
# This indicates the context matching failed or the file changed since parsing
|
||||
# Provide detailed error message
|
||||
expected_str = "\n".join(f"- {s}" for s in chunk.del_lines)
|
||||
actual_str = "\n".join(f" {s}" for s in actual_deleted_lines)
|
||||
raise DiffError(
|
||||
f"{path}: Mismatch applying patch near line {chunk_start_index + 1}.\n"
|
||||
f"Expected lines to remove:\n{expected_str}\n"
|
||||
f"Found lines in file:\n{actual_str}"
|
||||
)
|
||||
|
||||
# Add the inserted lines from the chunk
|
||||
dest_lines.extend(chunk.ins_lines)
|
||||
|
||||
# Advance the original line index past the lines processed (deleted lines)
|
||||
current_orig_line_idx = chunk_start_index + num_del
|
||||
|
||||
# Add any remaining lines from the original file after the last chunk
|
||||
dest_lines.extend(orig_lines[current_orig_line_idx:])
|
||||
|
||||
# Join lines and ensure a single trailing newline
|
||||
result = "\n".join(dest_lines)
|
||||
if result or orig_lines: # Add newline unless result is empty and original was empty
|
||||
result += "\n"
|
||||
return result
|
||||
161
aider/coders/patch_prompts.py
Normal file
161
aider/coders/patch_prompts.py
Normal file
@@ -0,0 +1,161 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from .base_prompts import CoderPrompts
|
||||
from .editblock_prompts import EditBlockPrompts
|
||||
|
||||
|
||||
class PatchPrompts(EditBlockPrompts):
|
||||
# --------------------------------------------------------------------- #
|
||||
# SYSTEM PROMPT
|
||||
# --------------------------------------------------------------------- #
|
||||
main_system = """Act as an expert software developer.
|
||||
Always use best practices when coding.
|
||||
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
||||
{final_reminders}
|
||||
Take requests for changes to the supplied code.
|
||||
If the request is ambiguous, ask questions.
|
||||
|
||||
Always reply to the user in {language}.
|
||||
|
||||
Once you understand the request you MUST:
|
||||
|
||||
1. Decide if you need to propose edits to any files that haven't been added to the chat. You can create new files without asking!
|
||||
|
||||
• If you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.
|
||||
• End your reply and wait for their approval.
|
||||
• You can keep asking if you then decide you need to edit more files.
|
||||
|
||||
2. Think step‑by‑step and explain the needed changes in a few short sentences.
|
||||
|
||||
3. Describe the changes using the V4A diff format, enclosed within `*** Begin Patch` and `*** End Patch` markers.
|
||||
|
||||
IMPORTANT: Each file MUST appear only once in the patch.
|
||||
Consolidate **all** edits for a given file into a single `*** [ACTION] File:` block.
|
||||
{shell_cmd_prompt}
|
||||
"""
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# EXAMPLE MESSAGES
|
||||
# --------------------------------------------------------------------- #
|
||||
example_messages = [
|
||||
# --- Example 1 ---------------------------------------------------- #
|
||||
dict(
|
||||
role="user",
|
||||
content="Change get_factorial() to use math.factorial",
|
||||
),
|
||||
dict(
|
||||
role="assistant",
|
||||
content="""To make this change we need to modify `mathweb/flask/app.py`:
|
||||
|
||||
1. Import the `math` module.
|
||||
2. Replace the custom recursive `factorial()` helper with `math.factorial`.
|
||||
3. Update `get_factorial()` to call `math.factorial`.
|
||||
|
||||
Here is the patch:
|
||||
|
||||
*** Begin Patch
|
||||
*** Update File: mathweb/flask/app.py
|
||||
@@
|
||||
-from flask import Flask
|
||||
-
|
||||
-from flask import Flask
|
||||
+from flask import Flask
|
||||
+import math
|
||||
@@
|
||||
-def factorial(n):
|
||||
- "compute factorial"
|
||||
-
|
||||
- if n == 0:
|
||||
- return 1
|
||||
- else:
|
||||
- return n * factorial(n-1)
|
||||
+def factorial(n):
|
||||
+ "compute factorial"
|
||||
+
|
||||
+ return math.factorial(n)
|
||||
@@ def get_factorial(self, n):
|
||||
- return str(factorial(n))
|
||||
+ return str(math.factorial(n))
|
||||
*** End Patch
|
||||
""",
|
||||
),
|
||||
# --- Example 2 ---------------------------------------------------- #
|
||||
dict(
|
||||
role="user",
|
||||
content="Refactor hello() into its own file.",
|
||||
),
|
||||
dict(
|
||||
role="assistant",
|
||||
content="""To make this change we need to modify `main.py` and create a new file `hello.py`:
|
||||
|
||||
1. Create `hello.py` with the `hello()` function.
|
||||
2. Remove `hello()` from `main.py` and import it instead.
|
||||
|
||||
Here is the patch:
|
||||
|
||||
*** Begin Patch
|
||||
*** Add File: hello.py
|
||||
+# A simple function
|
||||
+def hello():
|
||||
+ "print a greeting"
|
||||
+
|
||||
+ print("hello")
|
||||
*** Update File: main.py
|
||||
@@
|
||||
-def hello():
|
||||
- "print a greeting"
|
||||
-
|
||||
- print("hello")
|
||||
+from hello import hello
|
||||
*** End Patch
|
||||
""",
|
||||
),
|
||||
]
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# SYSTEM REMINDER
|
||||
# --------------------------------------------------------------------- #
|
||||
system_reminder = """# V4A Diff Format Rules:
|
||||
|
||||
Your entire response containing the patch MUST start with `*** Begin Patch` on a line by itself.
|
||||
Your entire response containing the patch MUST end with `*** End Patch` on a line by itself.
|
||||
|
||||
Use the *FULL* file path, as shown to you by the user.
|
||||
{quad_backtick_reminder}
|
||||
|
||||
For each file you need to modify, start with a marker line:
|
||||
|
||||
*** [ACTION] File: [path/to/file]
|
||||
|
||||
Where `[ACTION]` is one of `Add`, `Update`, or `Delete`.
|
||||
|
||||
⇨ **Each file MUST appear only once in the patch.**
|
||||
Consolidate all changes for that file into the same block.
|
||||
If you are moving code within a file, include both the deletions and the
|
||||
insertions as separate hunks inside this single `*** Update File:` block
|
||||
(do *not* open a second block for the same file).
|
||||
|
||||
For `Update` actions, describe each snippet of code that needs to be changed using the following format:
|
||||
1. Context lines: Include 3 lines of context *before* the change. These lines MUST start with a single space ` `.
|
||||
2. Lines to remove: Precede each line to be removed with a minus sign `-`.
|
||||
3. Lines to add: Precede each line to be added with a plus sign `+`.
|
||||
4. Context lines: Include 3 lines of context *after* the change. These lines MUST start with a single space ` `.
|
||||
|
||||
Context lines MUST exactly match the existing file content, character for character, including indentation.
|
||||
If a change is near the beginning or end of the file, include fewer than 3 context lines as appropriate.
|
||||
If 3 lines of context is insufficient to uniquely identify the snippet, use `@@ [CLASS_OR_FUNCTION_NAME]` markers on their own lines *before* the context lines to specify the scope. You can use multiple `@@` markers if needed.
|
||||
Do not include line numbers.
|
||||
|
||||
Only create patches for files that the user has added to the chat!
|
||||
|
||||
When moving code *within* a single file, keep everything inside one
|
||||
`*** Update File:` block. Provide one hunk that deletes the code from its
|
||||
original location and another hunk that inserts it at the new location.
|
||||
|
||||
For `Add` actions, use the `*** Add File: [path/to/new/file]` marker, followed by the lines of the new file, each preceded by a plus sign `+`.
|
||||
|
||||
For `Delete` actions, use the `*** Delete File: [path/to/file]` marker. No other lines are needed for the deletion.
|
||||
|
||||
{rename_with_shell}{go_ahead_tip}{final_reminders}ONLY EVER RETURN CODE IN THE SPECIFIED V4A DIFF FORMAT!
|
||||
{shell_cmd_reminder}
|
||||
"""
|
||||
@@ -235,20 +235,6 @@ Left
|
||||
Left
|
||||
"""
|
||||
|
||||
"""
|
||||
ri = RelativeIndenter([example])
|
||||
dump(example)
|
||||
|
||||
rel_example = ri.make_relative(example)
|
||||
dump(repr(rel_example))
|
||||
|
||||
abs_example = ri.make_absolute(rel_example)
|
||||
dump(abs_example)
|
||||
|
||||
|
||||
sys.exit()
|
||||
"""
|
||||
|
||||
|
||||
def relative_indent(texts):
|
||||
ri = RelativeIndenter(texts)
|
||||
@@ -349,7 +335,7 @@ def lines_to_chars(lines, mapping):
|
||||
return new_text
|
||||
|
||||
|
||||
def dmp_lines_apply(texts, remap=True):
|
||||
def dmp_lines_apply(texts):
|
||||
debug = False
|
||||
# debug = True
|
||||
|
||||
@@ -655,8 +641,6 @@ def proc(dname):
|
||||
(dmp_lines_apply, all_preprocs),
|
||||
]
|
||||
|
||||
_strategies = editblock_strategies # noqa: F841
|
||||
|
||||
short_names = dict(
|
||||
search_and_replace="sr",
|
||||
git_cherry_pick_osr_onto_o="cp_o",
|
||||
|
||||
37
aider/coders/shell.py
Normal file
37
aider/coders/shell.py
Normal file
@@ -0,0 +1,37 @@
|
||||
shell_cmd_prompt = """
|
||||
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
|
||||
|
||||
Just suggest shell commands this way, not example code.
|
||||
Only suggest complete shell commands that are ready to execute, without placeholders.
|
||||
Only suggest at most a few shell commands at a time, not more than 1-3, one per line.
|
||||
Do not suggest multi-line shell commands.
|
||||
All shell commands will run from the root directory of the user's project.
|
||||
|
||||
Use the appropriate shell based on the user's system info:
|
||||
{platform}
|
||||
Examples of when to suggest shell commands:
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
""" # noqa
|
||||
|
||||
no_shell_cmd_prompt = """
|
||||
Keep in mind these details about the user's platform and environment:
|
||||
{platform}
|
||||
""" # noqa
|
||||
|
||||
shell_cmd_reminder = """
|
||||
Examples of when to suggest shell commands:
|
||||
|
||||
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
|
||||
- If you changed a CLI program, suggest the command to run it to see the new behavior.
|
||||
- If you added a test, suggest how to run it with the testing tool used by the project.
|
||||
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
|
||||
- If your code changes add new dependencies, suggest the command to install them.
|
||||
- Etc.
|
||||
|
||||
""" # noqa
|
||||
@@ -45,6 +45,7 @@ other_hunks_applied = (
|
||||
|
||||
class UnifiedDiffCoder(Coder):
|
||||
"""A coder that uses unified diff format for code modifications."""
|
||||
|
||||
edit_format = "udiff"
|
||||
gpt_prompts = UnifiedDiffPrompts()
|
||||
|
||||
@@ -344,7 +345,16 @@ def process_fenced_block(lines, start_line_num):
|
||||
|
||||
if block[0].startswith("--- ") and block[1].startswith("+++ "):
|
||||
# Extract the file path, considering that it might contain spaces
|
||||
fname = block[1][4:].strip()
|
||||
a_fname = block[0][4:].strip()
|
||||
b_fname = block[1][4:].strip()
|
||||
|
||||
# Check if standard git diff prefixes are present (or /dev/null) and strip them
|
||||
if (a_fname.startswith("a/") or a_fname == "/dev/null") and b_fname.startswith("b/"):
|
||||
fname = b_fname[2:]
|
||||
else:
|
||||
# Otherwise, assume the path is as intended
|
||||
fname = b_fname
|
||||
|
||||
block = block[2:]
|
||||
else:
|
||||
fname = None
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
# flake8: noqa: E501
|
||||
|
||||
from . import shell
|
||||
from .base_prompts import CoderPrompts
|
||||
|
||||
|
||||
class UnifiedDiffPrompts(CoderPrompts):
|
||||
main_system = """Act as an expert software developer.
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Always use best practices when coding.
|
||||
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
||||
|
||||
@@ -106,5 +107,9 @@ To move code within a file, use 2 hunks: 1 to delete it from its current locatio
|
||||
|
||||
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
|
||||
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
"""
|
||||
|
||||
shell_cmd_prompt = shell.shell_cmd_prompt
|
||||
no_shell_cmd_prompt = shell.no_shell_cmd_prompt
|
||||
shell_cmd_reminder = shell.shell_cmd_reminder
|
||||
|
||||
14
aider/coders/udiff_simple.py
Normal file
14
aider/coders/udiff_simple.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from .udiff_coder import UnifiedDiffCoder
|
||||
from .udiff_simple_prompts import UnifiedDiffSimplePrompts
|
||||
|
||||
|
||||
class UnifiedDiffSimpleCoder(UnifiedDiffCoder):
|
||||
"""
|
||||
A coder that uses unified diff format for code modifications.
|
||||
This variant uses a simpler prompt that doesn't mention specific
|
||||
diff rules like using `@@ ... @@` lines or avoiding line numbers.
|
||||
"""
|
||||
|
||||
edit_format = "udiff-simple"
|
||||
|
||||
gpt_prompts = UnifiedDiffSimplePrompts()
|
||||
25
aider/coders/udiff_simple_prompts.py
Normal file
25
aider/coders/udiff_simple_prompts.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from .udiff_prompts import UnifiedDiffPrompts
|
||||
|
||||
|
||||
class UnifiedDiffSimplePrompts(UnifiedDiffPrompts):
|
||||
"""
|
||||
Prompts for the UnifiedDiffSimpleCoder.
|
||||
Inherits from UnifiedDiffPrompts and can override specific prompts
|
||||
if a simpler wording is desired for this edit format.
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
|
||||
system_reminder = """# File editing rules:
|
||||
|
||||
Return edits similar to unified diffs that `diff -U0` would produce.
|
||||
|
||||
The user's patch tool needs CORRECT patches that apply cleanly against the current contents of the file!
|
||||
Think carefully and make sure you include and mark all lines that need to be removed or changed as `-` lines.
|
||||
Make sure you mark all new or modified lines with `+`.
|
||||
Don't leave out any lines or the diff patch won't apply correctly.
|
||||
|
||||
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
|
||||
|
||||
{final_reminders}
|
||||
""" # noqa
|
||||
@@ -10,7 +10,7 @@ If the request is ambiguous, ask questions.
|
||||
|
||||
Always reply to the user in {language}.
|
||||
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
Once you understand the request you MUST:
|
||||
1. Determine if any code changes are needed.
|
||||
2. Explain any needed changes.
|
||||
@@ -61,7 +61,7 @@ To suggest changes to a file you MUST return a *file listing* that contains the
|
||||
*NEVER* skip, omit or elide content from a *file listing* using "..." or by adding comments like "... rest of code..."!
|
||||
Create a new file you MUST return a *file listing* which includes an appropriate filename, including any appropriate path.
|
||||
|
||||
{lazy_prompt}
|
||||
{final_reminders}
|
||||
"""
|
||||
|
||||
redacted_edit_message = "No changes are needed."
|
||||
|
||||
@@ -17,6 +17,7 @@ from aider import models, prompts, voice
|
||||
from aider.editor import pipe_editor
|
||||
from aider.format_settings import format_settings
|
||||
from aider.help import Help, install_help_extra
|
||||
from aider.io import CommandCompletionException
|
||||
from aider.llm import litellm
|
||||
from aider.repo import ANY_GIT_ERROR
|
||||
from aider.run_cmd import run_cmd
|
||||
@@ -27,8 +28,9 @@ from .dump import dump # noqa: F401
|
||||
|
||||
|
||||
class SwitchCoder(Exception):
|
||||
def __init__(self, **kwargs):
|
||||
def __init__(self, placeholder=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
self.placeholder = placeholder
|
||||
|
||||
|
||||
class Commands:
|
||||
@@ -45,6 +47,7 @@ class Commands:
|
||||
parser=self.parser,
|
||||
verbose=self.verbose,
|
||||
editor=self.editor,
|
||||
original_read_only_fnames=self.original_read_only_fnames,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
@@ -82,10 +85,48 @@ class Commands:
|
||||
self.original_read_only_fnames = set(original_read_only_fnames or [])
|
||||
|
||||
def cmd_model(self, args):
|
||||
"Switch to a new LLM"
|
||||
"Switch the Main Model to a new LLM"
|
||||
|
||||
model_name = args.strip()
|
||||
model = models.Model(model_name, weak_model=self.coder.main_model.weak_model.name)
|
||||
model = models.Model(
|
||||
model_name,
|
||||
editor_model=self.coder.main_model.editor_model.name,
|
||||
weak_model=self.coder.main_model.weak_model.name,
|
||||
)
|
||||
models.sanity_check_models(self.io, model)
|
||||
|
||||
# Check if the current edit format is the default for the old model
|
||||
old_model_edit_format = self.coder.main_model.edit_format
|
||||
current_edit_format = self.coder.edit_format
|
||||
|
||||
new_edit_format = current_edit_format
|
||||
if current_edit_format == old_model_edit_format:
|
||||
# If the user was using the old model's default, switch to the new model's default
|
||||
new_edit_format = model.edit_format
|
||||
|
||||
raise SwitchCoder(main_model=model, edit_format=new_edit_format)
|
||||
|
||||
def cmd_editor_model(self, args):
|
||||
"Switch the Editor Model to a new LLM"
|
||||
|
||||
model_name = args.strip()
|
||||
model = models.Model(
|
||||
self.coder.main_model.name,
|
||||
editor_model=model_name,
|
||||
weak_model=self.coder.main_model.weak_model.name,
|
||||
)
|
||||
models.sanity_check_models(self.io, model)
|
||||
raise SwitchCoder(main_model=model)
|
||||
|
||||
def cmd_weak_model(self, args):
|
||||
"Switch the Weak Model to a new LLM"
|
||||
|
||||
model_name = args.strip()
|
||||
model = models.Model(
|
||||
self.coder.main_model.name,
|
||||
editor_model=self.coder.main_model.editor_model.name,
|
||||
weak_model=model_name,
|
||||
)
|
||||
models.sanity_check_models(self.io, model)
|
||||
raise SwitchCoder(main_model=model)
|
||||
|
||||
@@ -118,6 +159,10 @@ class Commands:
|
||||
" them."
|
||||
),
|
||||
),
|
||||
(
|
||||
"context",
|
||||
"Automatically identify which files will need to be edited.",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -176,12 +221,18 @@ class Commands:
|
||||
|
||||
self.io.tool_output(f"Scraping {url}...")
|
||||
if not self.scraper:
|
||||
res = install_playwright(self.io)
|
||||
if not res:
|
||||
self.io.tool_warning("Unable to initialize playwright.")
|
||||
disable_playwright = getattr(self.args, "disable_playwright", False)
|
||||
if disable_playwright:
|
||||
res = False
|
||||
else:
|
||||
res = install_playwright(self.io)
|
||||
if not res:
|
||||
self.io.tool_warning("Unable to initialize playwright.")
|
||||
|
||||
self.scraper = Scraper(
|
||||
print_error=self.io.tool_error, playwright_available=res, verify_ssl=self.verify_ssl
|
||||
print_error=self.io.tool_error,
|
||||
playwright_available=res,
|
||||
verify_ssl=self.verify_ssl,
|
||||
)
|
||||
|
||||
content = self.scraper.scrape(url) or ""
|
||||
@@ -295,7 +346,7 @@ class Commands:
|
||||
return
|
||||
|
||||
commit_message = args.strip() if args else None
|
||||
self.coder.repo.commit(message=commit_message)
|
||||
self.coder.repo.commit(message=commit_message, coder=self.coder)
|
||||
|
||||
def cmd_lint(self, args="", fnames=None):
|
||||
"Lint and fix in-chat files or all dirty files if none in chat"
|
||||
@@ -970,9 +1021,15 @@ class Commands:
|
||||
dict(role="assistant", content="Ok."),
|
||||
]
|
||||
|
||||
if add and exit_status != 0:
|
||||
if add_on_nonzero_exit and exit_status != 0:
|
||||
# Return the formatted output message for test failures
|
||||
return msg
|
||||
elif add and exit_status != 0:
|
||||
self.io.placeholder = "What's wrong? Fix"
|
||||
|
||||
# Return None if output wasn't added or command succeeded
|
||||
return None
|
||||
|
||||
def cmd_exit(self, args):
|
||||
"Exit the application"
|
||||
self.coder.event("exit", reason="/exit")
|
||||
@@ -1088,6 +1145,18 @@ class Commands:
|
||||
show_announcements=False,
|
||||
)
|
||||
|
||||
def completions_ask(self):
|
||||
raise CommandCompletionException()
|
||||
|
||||
def completions_code(self):
|
||||
raise CommandCompletionException()
|
||||
|
||||
def completions_architect(self):
|
||||
raise CommandCompletionException()
|
||||
|
||||
def completions_context(self):
|
||||
raise CommandCompletionException()
|
||||
|
||||
def cmd_ask(self, args):
|
||||
"""Ask questions about the code base without editing any files. If no prompt provided, switches to ask mode.""" # noqa
|
||||
return self._generic_chat_command(args, "ask")
|
||||
@@ -1100,7 +1169,11 @@ class Commands:
|
||||
"""Enter architect/editor mode using 2 different models. If no prompt provided, switches to architect/editor mode.""" # noqa
|
||||
return self._generic_chat_command(args, "architect")
|
||||
|
||||
def _generic_chat_command(self, args, edit_format):
|
||||
def cmd_context(self, args):
|
||||
"""Enter context mode to see surrounding code context. If no prompt provided, switches to context mode.""" # noqa
|
||||
return self._generic_chat_command(args, "context", placeholder=args.strip() or None)
|
||||
|
||||
def _generic_chat_command(self, args, edit_format, placeholder=None):
|
||||
if not args.strip():
|
||||
# Switch to the corresponding chat mode if no args provided
|
||||
return self.cmd_chat_mode(edit_format)
|
||||
@@ -1117,11 +1190,13 @@ class Commands:
|
||||
user_msg = args
|
||||
coder.run(user_msg)
|
||||
|
||||
# Use the provided placeholder if any
|
||||
raise SwitchCoder(
|
||||
edit_format=self.coder.edit_format,
|
||||
summarize_from_coder=False,
|
||||
from_coder=coder,
|
||||
show_announcements=False,
|
||||
placeholder=placeholder,
|
||||
)
|
||||
|
||||
def get_help_md(self):
|
||||
@@ -1317,7 +1392,30 @@ class Commands:
|
||||
"Print out the current settings"
|
||||
settings = format_settings(self.parser, self.args)
|
||||
announcements = "\n".join(self.coder.get_announcements())
|
||||
|
||||
# Build metadata for the active models (main, editor, weak)
|
||||
model_sections = []
|
||||
active_models = [
|
||||
("Main model", self.coder.main_model),
|
||||
("Editor model", getattr(self.coder.main_model, "editor_model", None)),
|
||||
("Weak model", getattr(self.coder.main_model, "weak_model", None)),
|
||||
]
|
||||
for label, model in active_models:
|
||||
if not model:
|
||||
continue
|
||||
info = getattr(model, "info", {}) or {}
|
||||
if not info:
|
||||
continue
|
||||
model_sections.append(f"{label} ({model.name}):")
|
||||
for k, v in sorted(info.items()):
|
||||
model_sections.append(f" {k}: {v}")
|
||||
model_sections.append("") # blank line between models
|
||||
|
||||
model_metadata = "\n".join(model_sections)
|
||||
|
||||
output = f"{announcements}\n{settings}"
|
||||
if model_metadata:
|
||||
output += "\n" + model_metadata
|
||||
self.io.tool_output(output)
|
||||
|
||||
def completions_raw_load(self, document, complete_event):
|
||||
@@ -1434,17 +1532,21 @@ class Commands:
|
||||
if user_input.strip():
|
||||
self.io.set_placeholder(user_input.rstrip())
|
||||
|
||||
def cmd_edit(self, args=""):
|
||||
"Alias for /editor: Open an editor to write a prompt"
|
||||
return self.cmd_editor(args)
|
||||
|
||||
def cmd_think_tokens(self, args):
|
||||
"Set the thinking token budget (supports formats like 8096, 8k, 10.5k, 0.5M)"
|
||||
model = self.coder.main_model
|
||||
|
||||
if not args.strip():
|
||||
# Display current value if no args are provided
|
||||
formatted_budget = model.get_thinking_tokens(model)
|
||||
formatted_budget = model.get_thinking_tokens()
|
||||
if formatted_budget is None:
|
||||
self.io.tool_output("Thinking tokens are not currently set.")
|
||||
else:
|
||||
budget = model.extra_params["thinking"].get("budget_tokens")
|
||||
budget = model.get_raw_thinking_tokens()
|
||||
self.io.tool_output(
|
||||
f"Current thinking token budget: {budget:,} tokens ({formatted_budget})."
|
||||
)
|
||||
@@ -1453,8 +1555,8 @@ class Commands:
|
||||
value = args.strip()
|
||||
model.set_thinking_tokens(value)
|
||||
|
||||
formatted_budget = model.get_thinking_tokens(model)
|
||||
budget = model.extra_params["thinking"].get("budget_tokens")
|
||||
formatted_budget = model.get_thinking_tokens()
|
||||
budget = model.get_raw_thinking_tokens()
|
||||
|
||||
self.io.tool_output(f"Set thinking token budget to {budget:,} tokens ({formatted_budget}).")
|
||||
self.io.tool_output()
|
||||
@@ -1469,7 +1571,7 @@ class Commands:
|
||||
|
||||
if not args.strip():
|
||||
# Display current value if no args are provided
|
||||
reasoning_value = model.get_reasoning_effort(model)
|
||||
reasoning_value = model.get_reasoning_effort()
|
||||
if reasoning_value is None:
|
||||
self.io.tool_output("Reasoning effort is not currently set.")
|
||||
else:
|
||||
@@ -1478,7 +1580,7 @@ class Commands:
|
||||
|
||||
value = args.strip()
|
||||
model.set_reasoning_effort(value)
|
||||
reasoning_value = model.get_reasoning_effort(model)
|
||||
reasoning_value = model.get_reasoning_effort()
|
||||
self.io.tool_output(f"Set reasoning effort to {reasoning_value}")
|
||||
self.io.tool_output()
|
||||
|
||||
|
||||
@@ -83,4 +83,25 @@ class LiteLLMExceptions:
|
||||
)
|
||||
if "boto3" in str(ex):
|
||||
return ExInfo("APIConnectionError", False, "You need to: pip install boto3")
|
||||
if "OpenrouterException" in str(ex) and "'choices'" in str(ex):
|
||||
return ExInfo(
|
||||
"APIConnectionError",
|
||||
True,
|
||||
(
|
||||
"OpenRouter or the upstream API provider is down, overloaded or rate"
|
||||
" limiting your requests."
|
||||
),
|
||||
)
|
||||
|
||||
# Check for specific non-retryable APIError cases like insufficient credits
|
||||
if ex.__class__ is litellm.APIError:
|
||||
err_str = str(ex).lower()
|
||||
if "insufficient credits" in err_str and '"code":402' in err_str:
|
||||
return ExInfo(
|
||||
"APIError",
|
||||
False,
|
||||
"Insufficient credits with the API provider. Please add credits.",
|
||||
)
|
||||
# Fall through to default APIError handling if not the specific credits error
|
||||
|
||||
return self.exceptions.get(ex.__class__, ExInfo(None, None, None))
|
||||
|
||||
@@ -11,7 +11,7 @@ from aider.coders import Coder
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import InputOutput
|
||||
from aider.main import main as cli_main
|
||||
from aider.scrape import Scraper
|
||||
from aider.scrape import Scraper, has_playwright
|
||||
|
||||
|
||||
class CaptureIO(InputOutput):
|
||||
@@ -484,7 +484,7 @@ class GUI:
|
||||
url = self.web_content
|
||||
|
||||
if not self.state.scraper:
|
||||
self.scraper = Scraper(print_error=self.info)
|
||||
self.scraper = Scraper(print_error=self.info, playwright_available=has_playwright())
|
||||
|
||||
content = self.scraper.scrape(url) or ""
|
||||
if content.strip():
|
||||
|
||||
@@ -10,4 +10,10 @@ exclude_website_pats = [
|
||||
"docs/unified-diffs.md",
|
||||
"docs/leaderboards/index.md",
|
||||
"assets/**",
|
||||
".jekyll-metadata",
|
||||
"Gemfile.lock",
|
||||
"Gemfile",
|
||||
"_config.yml",
|
||||
"**/OLD/**",
|
||||
"OLD/**",
|
||||
]
|
||||
|
||||
101
aider/io.py
101
aider/io.py
@@ -18,6 +18,7 @@ from prompt_toolkit.enums import EditingMode
|
||||
from prompt_toolkit.filters import Condition, is_searching
|
||||
from prompt_toolkit.history import FileHistory
|
||||
from prompt_toolkit.key_binding import KeyBindings
|
||||
from prompt_toolkit.key_binding.vi_state import InputMode
|
||||
from prompt_toolkit.keys import Keys
|
||||
from prompt_toolkit.lexers import PygmentsLexer
|
||||
from prompt_toolkit.output.vt100 import is_dumb_terminal
|
||||
@@ -25,6 +26,7 @@ from prompt_toolkit.shortcuts import CompleteStyle, PromptSession
|
||||
from prompt_toolkit.styles import Style
|
||||
from pygments.lexers import MarkdownLexer, guess_lexer_for_filename
|
||||
from pygments.token import Token
|
||||
from rich.color import ColorParseError
|
||||
from rich.columns import Columns
|
||||
from rich.console import Console
|
||||
from rich.markdown import Markdown
|
||||
@@ -34,6 +36,7 @@ from rich.text import Text
|
||||
from aider.mdstream import MarkdownStream
|
||||
|
||||
from .dump import dump # noqa: F401
|
||||
from .editor import pipe_editor
|
||||
from .utils import is_image_file
|
||||
|
||||
# Constants
|
||||
@@ -68,6 +71,13 @@ def restore_multiline(func):
|
||||
return wrapper
|
||||
|
||||
|
||||
class CommandCompletionException(Exception):
|
||||
"""Raised when a command should use the normal autocompleter instead of
|
||||
command-specific completion."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfirmGroup:
|
||||
preference: str = None
|
||||
@@ -186,8 +196,12 @@ class AutoCompleter(Completer):
|
||||
return
|
||||
|
||||
if text[0] == "/":
|
||||
yield from self.get_command_completions(document, complete_event, text, words)
|
||||
return
|
||||
try:
|
||||
yield from self.get_command_completions(document, complete_event, text, words)
|
||||
return
|
||||
except CommandCompletionException:
|
||||
# Fall through to normal completion
|
||||
pass
|
||||
|
||||
candidates = self.words
|
||||
candidates.update(set(self.fname_to_rel_fnames))
|
||||
@@ -348,6 +362,35 @@ class InputOutput:
|
||||
self.file_watcher = file_watcher
|
||||
self.root = root
|
||||
|
||||
# Validate color settings after console is initialized
|
||||
self._validate_color_settings()
|
||||
|
||||
def _validate_color_settings(self):
|
||||
"""Validate configured color strings and reset invalid ones."""
|
||||
color_attributes = [
|
||||
"user_input_color",
|
||||
"tool_output_color",
|
||||
"tool_error_color",
|
||||
"tool_warning_color",
|
||||
"assistant_output_color",
|
||||
"completion_menu_color",
|
||||
"completion_menu_bg_color",
|
||||
"completion_menu_current_color",
|
||||
"completion_menu_current_bg_color",
|
||||
]
|
||||
for attr_name in color_attributes:
|
||||
color_value = getattr(self, attr_name, None)
|
||||
if color_value:
|
||||
try:
|
||||
# Try creating a style to validate the color
|
||||
RichStyle(color=color_value)
|
||||
except ColorParseError as e:
|
||||
self.console.print(
|
||||
"[bold red]Warning:[/bold red] Invalid configuration for"
|
||||
f" {attr_name}: '{color_value}'. {e}. Disabling this color."
|
||||
)
|
||||
setattr(self, attr_name, None) # Reset invalid color to None
|
||||
|
||||
def _get_style(self):
|
||||
style_dict = {}
|
||||
if not self.pretty:
|
||||
@@ -373,9 +416,9 @@ class InputOutput:
|
||||
# Conditionally add 'completion-menu.completion.current' style
|
||||
completion_menu_current_style = []
|
||||
if self.completion_menu_current_bg_color:
|
||||
completion_menu_current_style.append(f"bg:{self.completion_menu_current_bg_color}")
|
||||
completion_menu_current_style.append(self.completion_menu_current_bg_color)
|
||||
if self.completion_menu_current_color:
|
||||
completion_menu_current_style.append(self.completion_menu_current_color)
|
||||
completion_menu_current_style.append(f"bg:{self.completion_menu_current_color}")
|
||||
if completion_menu_current_style:
|
||||
style_dict["completion-menu.completion.current"] = " ".join(
|
||||
completion_menu_current_style
|
||||
@@ -492,11 +535,16 @@ class InputOutput:
|
||||
get_rel_fname(fname, root) for fname in (abs_read_only_fnames or [])
|
||||
]
|
||||
show = self.format_files_for_input(rel_fnames, rel_read_only_fnames)
|
||||
|
||||
prompt_prefix = ""
|
||||
if edit_format:
|
||||
show += edit_format
|
||||
prompt_prefix += edit_format
|
||||
if self.multiline_mode:
|
||||
show += (" " if edit_format else "") + "multi"
|
||||
show += "> "
|
||||
prompt_prefix += (" " if edit_format else "") + "multi"
|
||||
prompt_prefix += "> "
|
||||
|
||||
show += prompt_prefix
|
||||
self.prompt_prefix = prompt_prefix
|
||||
|
||||
inp = ""
|
||||
multiline_input = False
|
||||
@@ -540,11 +588,30 @@ class InputOutput:
|
||||
"Navigate forward through history"
|
||||
event.current_buffer.history_forward()
|
||||
|
||||
@kb.add("c-x", "c-e")
|
||||
def _(event):
|
||||
"Edit current input in external editor (like Bash)"
|
||||
buffer = event.current_buffer
|
||||
current_text = buffer.text
|
||||
|
||||
# Open the editor with the current text
|
||||
edited_text = pipe_editor(input_data=current_text, suffix="md")
|
||||
|
||||
# Replace the buffer with the edited text, strip any trailing newlines
|
||||
buffer.text = edited_text.rstrip("\n")
|
||||
|
||||
# Move cursor to the end of the text
|
||||
buffer.cursor_position = len(buffer.text)
|
||||
|
||||
@kb.add("enter", eager=True, filter=~is_searching)
|
||||
def _(event):
|
||||
"Handle Enter key press"
|
||||
if self.multiline_mode:
|
||||
# In multiline mode, Enter adds a newline
|
||||
if self.multiline_mode and not (
|
||||
self.editingmode == EditingMode.VI
|
||||
and event.app.vi_state.input_mode == InputMode.NAVIGATION
|
||||
):
|
||||
# In multiline mode and if not in vi-mode or vi navigation/normal mode,
|
||||
# Enter adds a newline
|
||||
event.current_buffer.insert_text("\n")
|
||||
else:
|
||||
# In normal mode, Enter submits
|
||||
@@ -562,7 +629,7 @@ class InputOutput:
|
||||
|
||||
while True:
|
||||
if multiline_input:
|
||||
show = ". "
|
||||
show = self.prompt_prefix
|
||||
|
||||
try:
|
||||
if self.prompt_session:
|
||||
@@ -578,7 +645,7 @@ class InputOutput:
|
||||
self.clipboard_watcher.start()
|
||||
|
||||
def get_continuation(width, line_number, is_soft_wrap):
|
||||
return ". "
|
||||
return self.prompt_prefix
|
||||
|
||||
line = self.prompt_session.prompt(
|
||||
show,
|
||||
@@ -896,6 +963,7 @@ class InputOutput:
|
||||
|
||||
if not isinstance(message, Text):
|
||||
message = Text(message)
|
||||
color = ensure_hash_prefix(color) if color else None
|
||||
style = dict(style=color) if self.pretty and color else dict()
|
||||
try:
|
||||
self.console.print(message, **style)
|
||||
@@ -926,7 +994,7 @@ class InputOutput:
|
||||
style = dict()
|
||||
if self.pretty:
|
||||
if self.tool_output_color:
|
||||
style["color"] = self.tool_output_color
|
||||
style["color"] = ensure_hash_prefix(self.tool_output_color)
|
||||
style["reverse"] = bold
|
||||
|
||||
style = RichStyle(**style)
|
||||
@@ -1076,18 +1144,19 @@ class InputOutput:
|
||||
ro_paths = []
|
||||
for rel_path in read_only_files:
|
||||
abs_path = os.path.abspath(os.path.join(self.root, rel_path))
|
||||
ro_paths.append(abs_path if len(abs_path) < len(rel_path) else rel_path)
|
||||
ro_paths.append(Text(abs_path if len(abs_path) < len(rel_path) else rel_path))
|
||||
|
||||
files_with_label = ["Readonly:"] + ro_paths
|
||||
files_with_label = [Text("Readonly:")] + ro_paths
|
||||
read_only_output = StringIO()
|
||||
Console(file=read_only_output, force_terminal=False).print(Columns(files_with_label))
|
||||
read_only_lines = read_only_output.getvalue().splitlines()
|
||||
console.print(Columns(files_with_label))
|
||||
|
||||
if editable_files:
|
||||
files_with_label = editable_files
|
||||
text_editable_files = [Text(f) for f in editable_files]
|
||||
files_with_label = text_editable_files
|
||||
if read_only_files:
|
||||
files_with_label = ["Editable:"] + editable_files
|
||||
files_with_label = [Text("Editable:")] + text_editable_files
|
||||
editable_output = StringIO()
|
||||
Console(file=editable_output, force_terminal=False).print(Columns(files_with_label))
|
||||
editable_lines = editable_output.getvalue().splitlines()
|
||||
|
||||
@@ -7,6 +7,7 @@ import warnings
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import oslex
|
||||
from grep_ast import TreeContext, filename_to_lang
|
||||
from grep_ast.tsl import get_parser # noqa: E402
|
||||
|
||||
@@ -44,7 +45,7 @@ class Linter:
|
||||
return fname
|
||||
|
||||
def run_cmd(self, cmd, rel_fname, code):
|
||||
cmd += " " + rel_fname
|
||||
cmd += " " + oslex.quote(rel_fname)
|
||||
|
||||
returncode = 0
|
||||
stdout = ""
|
||||
|
||||
136
aider/main.py
136
aider/main.py
@@ -14,6 +14,7 @@ except ImportError:
|
||||
git = None
|
||||
|
||||
import importlib_resources
|
||||
import shtab
|
||||
from dotenv import load_dotenv
|
||||
from prompt_toolkit.enums import EditingMode
|
||||
|
||||
@@ -30,6 +31,7 @@ from aider.history import ChatSummary
|
||||
from aider.io import InputOutput
|
||||
from aider.llm import litellm # noqa: F401; properly init litellm on launch
|
||||
from aider.models import ModelSettings
|
||||
from aider.onboarding import offer_openrouter_oauth, select_default_model
|
||||
from aider.repo import ANY_GIT_ERROR, GitRepo
|
||||
from aider.report import report_uncaught_exceptions
|
||||
from aider.versioncheck import check_version, install_from_main_branch, install_upgrade
|
||||
@@ -357,11 +359,21 @@ def register_models(git_root, model_settings_fname, io, verbose=False):
|
||||
|
||||
|
||||
def load_dotenv_files(git_root, dotenv_fname, encoding="utf-8"):
|
||||
# Standard .env file search path
|
||||
dotenv_files = generate_search_path_list(
|
||||
".env",
|
||||
git_root,
|
||||
dotenv_fname,
|
||||
)
|
||||
|
||||
# Explicitly add the OAuth keys file to the beginning of the list
|
||||
oauth_keys_file = Path.home() / ".aider" / "oauth-keys.env"
|
||||
if oauth_keys_file.exists():
|
||||
# Insert at the beginning so it's loaded first (and potentially overridden)
|
||||
dotenv_files.insert(0, str(oauth_keys_file.resolve()))
|
||||
# Remove duplicates if it somehow got included by generate_search_path_list
|
||||
dotenv_files = list(dict.fromkeys(dotenv_files))
|
||||
|
||||
loaded = []
|
||||
for fname in dotenv_files:
|
||||
try:
|
||||
@@ -491,6 +503,12 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
# Parse again to include any arguments that might have been defined in .env
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
if args.shell_completions:
|
||||
# Ensure parser.prog is set for shtab, though it should be by default
|
||||
parser.prog = "aider"
|
||||
print(shtab.complete(parser, shell=args.shell_completions))
|
||||
sys.exit(0)
|
||||
|
||||
if git is None:
|
||||
args.git = False
|
||||
|
||||
@@ -714,11 +732,6 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
if args.check_update:
|
||||
check_version(io, verbose=args.verbose)
|
||||
|
||||
if args.list_models:
|
||||
models.print_matching_models(io, args.list_models)
|
||||
analytics.event("exit", reason="Listed models")
|
||||
return 0
|
||||
|
||||
if args.git:
|
||||
git_root = setup_git(git_root, io)
|
||||
if args.gitignore:
|
||||
@@ -738,6 +751,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
register_models(git_root, args.model_settings_file, io, verbose=args.verbose)
|
||||
register_litellm_models(git_root, args.model_metadata_file, io, verbose=args.verbose)
|
||||
|
||||
if args.list_models:
|
||||
models.print_matching_models(io, args.list_models)
|
||||
analytics.event("exit", reason="Listed models")
|
||||
return 0
|
||||
|
||||
# Process any command line aliases
|
||||
if args.alias:
|
||||
for alias_def in args.alias:
|
||||
@@ -751,26 +769,49 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
alias, model = parts
|
||||
models.MODEL_ALIASES[alias.strip()] = model.strip()
|
||||
|
||||
if not args.model:
|
||||
# Select model based on available API keys
|
||||
model_key_pairs = [
|
||||
("ANTHROPIC_API_KEY", "sonnet"),
|
||||
("DEEPSEEK_API_KEY", "deepseek"),
|
||||
("OPENROUTER_API_KEY", "openrouter/anthropic/claude-3.7-sonnet"),
|
||||
("OPENAI_API_KEY", "gpt-4o"),
|
||||
("GEMINI_API_KEY", "flash"),
|
||||
]
|
||||
selected_model_name = select_default_model(args, io, analytics)
|
||||
if not selected_model_name:
|
||||
# Error message and analytics event are handled within select_default_model
|
||||
# It might have already offered OAuth if no model/keys were found.
|
||||
# If it failed here, we exit.
|
||||
return 1
|
||||
args.model = selected_model_name # Update args with the selected model
|
||||
|
||||
for env_key, model_name in model_key_pairs:
|
||||
if os.environ.get(env_key):
|
||||
args.model = model_name
|
||||
io.tool_warning(
|
||||
f"Found {env_key} so using {model_name} since no --model was specified."
|
||||
# Check if an OpenRouter model was selected/specified but the key is missing
|
||||
if args.model.startswith("openrouter/") and not os.environ.get("OPENROUTER_API_KEY"):
|
||||
io.tool_warning(
|
||||
f"The specified model '{args.model}' requires an OpenRouter API key, which was not"
|
||||
" found."
|
||||
)
|
||||
# Attempt OAuth flow because the specific model needs it
|
||||
if offer_openrouter_oauth(io, analytics):
|
||||
# OAuth succeeded, the key should now be in os.environ.
|
||||
# Check if the key is now present after the flow.
|
||||
if os.environ.get("OPENROUTER_API_KEY"):
|
||||
io.tool_output(
|
||||
"OpenRouter successfully connected."
|
||||
) # Inform user connection worked
|
||||
else:
|
||||
# This case should ideally not happen if offer_openrouter_oauth succeeded
|
||||
# but check defensively.
|
||||
io.tool_error(
|
||||
"OpenRouter authentication seemed successful, but the key is still missing."
|
||||
)
|
||||
break
|
||||
if not args.model:
|
||||
io.tool_error("You need to specify a --model and an --api-key to use.")
|
||||
io.offer_url(urls.models_and_keys, "Open documentation url for more info?")
|
||||
analytics.event(
|
||||
"exit",
|
||||
reason="OpenRouter key missing after successful OAuth for specified model",
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
# OAuth failed or was declined by the user
|
||||
io.tool_error(
|
||||
f"Unable to proceed without an OpenRouter API key for model '{args.model}'."
|
||||
)
|
||||
io.offer_url(urls.models_and_keys, "Open documentation URL for more info?")
|
||||
analytics.event(
|
||||
"exit",
|
||||
reason="OpenRouter key missing for specified model and OAuth failed/declined",
|
||||
)
|
||||
return 1
|
||||
|
||||
main_model = models.Model(
|
||||
@@ -787,16 +828,43 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
"Model setting 'remove_reasoning' is deprecated, please use 'reasoning_tag' instead."
|
||||
)
|
||||
|
||||
# Set reasoning effort if specified
|
||||
# Set reasoning effort and thinking tokens if specified
|
||||
if args.reasoning_effort is not None:
|
||||
main_model.set_reasoning_effort(args.reasoning_effort)
|
||||
# Apply if check is disabled or model explicitly supports it
|
||||
if not args.check_model_accepts_settings or (
|
||||
main_model.accepts_settings and "reasoning_effort" in main_model.accepts_settings
|
||||
):
|
||||
main_model.set_reasoning_effort(args.reasoning_effort)
|
||||
|
||||
# Set thinking tokens if specified
|
||||
if args.thinking_tokens is not None:
|
||||
main_model.set_thinking_tokens(args.thinking_tokens)
|
||||
# Apply if check is disabled or model explicitly supports it
|
||||
if not args.check_model_accepts_settings or (
|
||||
main_model.accepts_settings and "thinking_tokens" in main_model.accepts_settings
|
||||
):
|
||||
main_model.set_thinking_tokens(args.thinking_tokens)
|
||||
|
||||
# Show warnings about unsupported settings that are being ignored
|
||||
if args.check_model_accepts_settings:
|
||||
settings_to_check = [
|
||||
{"arg": args.reasoning_effort, "name": "reasoning_effort"},
|
||||
{"arg": args.thinking_tokens, "name": "thinking_tokens"},
|
||||
]
|
||||
|
||||
for setting in settings_to_check:
|
||||
if setting["arg"] is not None and (
|
||||
not main_model.accepts_settings
|
||||
or setting["name"] not in main_model.accepts_settings
|
||||
):
|
||||
io.tool_warning(
|
||||
f"Warning: {main_model.name} does not support '{setting['name']}', ignoring."
|
||||
)
|
||||
io.tool_output(
|
||||
f"Use --no-check-model-accepts-settings to force the '{setting['name']}'"
|
||||
" setting."
|
||||
)
|
||||
|
||||
if args.copy_paste and args.edit_format is None:
|
||||
if main_model.edit_format in ("diff", "whole"):
|
||||
if main_model.edit_format in ("diff", "whole", "diff-fenced"):
|
||||
main_model.edit_format = "editor-" + main_model.edit_format
|
||||
|
||||
if args.verbose:
|
||||
@@ -842,6 +910,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
attribute_commit_message_committer=args.attribute_commit_message_committer,
|
||||
commit_prompt=args.commit_prompt,
|
||||
subtree_only=args.subtree_only,
|
||||
git_commit_verify=args.git_commit_verify,
|
||||
attribute_co_authored_by=args.attribute_co_authored_by, # Pass the arg
|
||||
)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
@@ -890,6 +960,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
else:
|
||||
map_tokens = args.map_tokens
|
||||
|
||||
# Track auto-commits configuration
|
||||
analytics.event("auto_commits", enabled=bool(args.auto_commits))
|
||||
|
||||
try:
|
||||
coder = Coder.create(
|
||||
main_model=main_model,
|
||||
@@ -1036,6 +1109,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
io.tool_output(f"Cur working dir: {Path.cwd()}")
|
||||
io.tool_output(f"Git working dir: {git_root}")
|
||||
|
||||
if args.stream and args.cache_prompts:
|
||||
io.tool_warning("Cost estimates may be inaccurate when using streaming and caching.")
|
||||
|
||||
if args.load:
|
||||
commands.cmd_load(args.load)
|
||||
|
||||
@@ -1081,6 +1157,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
except SwitchCoder as switch:
|
||||
coder.ok_to_warm_cache = False
|
||||
|
||||
# Set the placeholder if provided
|
||||
if hasattr(switch, "placeholder") and switch.placeholder is not None:
|
||||
io.placeholder = switch.placeholder
|
||||
|
||||
kwargs = dict(io=io, from_coder=coder)
|
||||
kwargs.update(switch.kwargs)
|
||||
if "show_announcements" in kwargs:
|
||||
|
||||
@@ -3,9 +3,12 @@
|
||||
import io
|
||||
import time
|
||||
|
||||
from rich import box
|
||||
from rich.console import Console
|
||||
from rich.live import Live
|
||||
from rich.markdown import Markdown
|
||||
from rich.markdown import CodeBlock, Heading, Markdown
|
||||
from rich.panel import Panel
|
||||
from rich.syntax import Syntax
|
||||
from rich.text import Text
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
@@ -46,6 +49,46 @@ The end.
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
class NoInsetCodeBlock(CodeBlock):
|
||||
"""A code block with syntax highlighting and no padding."""
|
||||
|
||||
def __rich_console__(self, console, options):
|
||||
code = str(self.text).rstrip()
|
||||
syntax = Syntax(code, self.lexer_name, theme=self.theme, word_wrap=True, padding=(1, 0))
|
||||
yield syntax
|
||||
|
||||
|
||||
class LeftHeading(Heading):
|
||||
"""A heading class that renders left-justified."""
|
||||
|
||||
def __rich_console__(self, console, options):
|
||||
text = self.text
|
||||
text.justify = "left" # Override justification
|
||||
if self.tag == "h1":
|
||||
# Draw a border around h1s, but keep text left-aligned
|
||||
yield Panel(
|
||||
text,
|
||||
box=box.HEAVY,
|
||||
style="markdown.h1.border",
|
||||
)
|
||||
else:
|
||||
# Styled text for h2 and beyond
|
||||
if self.tag == "h2":
|
||||
yield Text("") # Keep the blank line before h2
|
||||
yield text
|
||||
|
||||
|
||||
class NoInsetMarkdown(Markdown):
|
||||
"""Markdown with code blocks that have no padding and left-justified headings."""
|
||||
|
||||
elements = {
|
||||
**Markdown.elements,
|
||||
"fence": NoInsetCodeBlock,
|
||||
"code_block": NoInsetCodeBlock,
|
||||
"heading_open": LeftHeading,
|
||||
}
|
||||
|
||||
|
||||
class MarkdownStream:
|
||||
"""Streaming markdown renderer that progressively displays content with a live updating window.
|
||||
|
||||
@@ -72,9 +115,9 @@ class MarkdownStream:
|
||||
else:
|
||||
self.mdargs = dict()
|
||||
|
||||
# Initialize rich Live display with empty text
|
||||
self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
|
||||
self.live.start()
|
||||
# Defer Live creation until the first update.
|
||||
self.live = None
|
||||
self._live_started = False
|
||||
|
||||
def _render_markdown_to_lines(self, text):
|
||||
"""Render markdown text to a list of lines.
|
||||
@@ -88,7 +131,7 @@ class MarkdownStream:
|
||||
# Render the markdown to a string buffer
|
||||
string_io = io.StringIO()
|
||||
console = Console(file=string_io, force_terminal=True)
|
||||
markdown = Markdown(text, **self.mdargs)
|
||||
markdown = NoInsetMarkdown(text, **self.mdargs)
|
||||
console.print(markdown)
|
||||
output = string_io.getvalue()
|
||||
|
||||
@@ -120,6 +163,12 @@ class MarkdownStream:
|
||||
Markdown going to the console works better in terminal scrollback buffers.
|
||||
The live window doesn't play nice with terminal scrollback.
|
||||
"""
|
||||
# On the first call, stop the spinner and start the Live renderer
|
||||
if not getattr(self, "_live_started", False):
|
||||
self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
|
||||
self.live.start()
|
||||
self._live_started = True
|
||||
|
||||
now = time.time()
|
||||
# Throttle updates to maintain smooth rendering
|
||||
if not final and now - self.when < self.min_delay:
|
||||
@@ -186,6 +235,7 @@ if __name__ == "__main__":
|
||||
_text = _text * 10
|
||||
|
||||
pm = MarkdownStream()
|
||||
print("Using NoInsetMarkdown for code blocks with padding=0")
|
||||
for i in range(6, len(_text), 5):
|
||||
pm.update(_text[:i])
|
||||
time.sleep(0.01)
|
||||
|
||||
240
aider/models.py
240
aider/models.py
@@ -17,6 +17,7 @@ from PIL import Image
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.llm import litellm
|
||||
from aider.openrouter import OpenRouterModelManager
|
||||
from aider.sendchat import ensure_alternating_roles, sanity_check_messages
|
||||
from aider.utils import check_pip_install_extra
|
||||
|
||||
@@ -88,8 +89,14 @@ MODEL_ALIASES = {
|
||||
"3": "gpt-3.5-turbo",
|
||||
# Other models
|
||||
"deepseek": "deepseek/deepseek-chat",
|
||||
"flash": "gemini/gemini-2.5-flash-preview-04-17",
|
||||
"quasar": "openrouter/openrouter/quasar-alpha",
|
||||
"r1": "deepseek/deepseek-reasoner",
|
||||
"flash": "gemini/gemini-2.0-flash-exp",
|
||||
"gemini-2.5-pro": "gemini/gemini-2.5-pro-preview-05-06",
|
||||
"gemini": "gemini/gemini-2.5-pro-preview-05-06",
|
||||
"gemini-exp": "gemini/gemini-2.5-pro-exp-03-25",
|
||||
"grok3": "xai/grok-3-beta",
|
||||
"optimus": "openrouter/openrouter/optimus-alpha",
|
||||
}
|
||||
# Model metadata loaded from resources and user's files.
|
||||
|
||||
@@ -103,6 +110,7 @@ class ModelSettings:
|
||||
use_repo_map: bool = False
|
||||
send_undo_reply: bool = False
|
||||
lazy: bool = False
|
||||
overeager: bool = False
|
||||
reminder: str = "user"
|
||||
examples_as_sys_msg: bool = False
|
||||
extra_params: Optional[dict] = None
|
||||
@@ -116,6 +124,7 @@ class ModelSettings:
|
||||
reasoning_tag: Optional[str] = None
|
||||
remove_reasoning: Optional[str] = None # Deprecated alias for reasoning_tag
|
||||
system_prompt_prefix: Optional[str] = None
|
||||
accepts_settings: Optional[list] = None
|
||||
|
||||
|
||||
# Load model settings from package resource
|
||||
@@ -141,8 +150,13 @@ class ModelInfoManager:
|
||||
self.verify_ssl = True
|
||||
self._cache_loaded = False
|
||||
|
||||
# Manager for the cached OpenRouter model database
|
||||
self.openrouter_manager = OpenRouterModelManager()
|
||||
|
||||
def set_verify_ssl(self, verify_ssl):
|
||||
self.verify_ssl = verify_ssl
|
||||
if hasattr(self, "openrouter_manager"):
|
||||
self.openrouter_manager.set_verify_ssl(verify_ssl)
|
||||
|
||||
def _load_cache(self):
|
||||
if self._cache_loaded:
|
||||
@@ -223,8 +237,68 @@ class ModelInfoManager:
|
||||
if litellm_info:
|
||||
return litellm_info
|
||||
|
||||
if not cached_info and model.startswith("openrouter/"):
|
||||
# First try using the locally cached OpenRouter model database
|
||||
openrouter_info = self.openrouter_manager.get_model_info(model)
|
||||
if openrouter_info:
|
||||
return openrouter_info
|
||||
|
||||
# Fallback to legacy web-scraping if the API cache does not contain the model
|
||||
openrouter_info = self.fetch_openrouter_model_info(model)
|
||||
if openrouter_info:
|
||||
return openrouter_info
|
||||
|
||||
return cached_info
|
||||
|
||||
def fetch_openrouter_model_info(self, model):
|
||||
"""
|
||||
Fetch model info by scraping the openrouter model page.
|
||||
Expected URL: https://openrouter.ai/<model_route>
|
||||
Example: openrouter/qwen/qwen-2.5-72b-instruct:free
|
||||
Returns a dict with keys: max_tokens, max_input_tokens, max_output_tokens,
|
||||
input_cost_per_token, output_cost_per_token.
|
||||
"""
|
||||
url_part = model[len("openrouter/") :]
|
||||
url = "https://openrouter.ai/" + url_part
|
||||
try:
|
||||
import requests
|
||||
|
||||
response = requests.get(url, timeout=5, verify=self.verify_ssl)
|
||||
if response.status_code != 200:
|
||||
return {}
|
||||
html = response.text
|
||||
import re
|
||||
|
||||
if re.search(
|
||||
rf"The model\s*.*{re.escape(url_part)}.* is not available", html, re.IGNORECASE
|
||||
):
|
||||
print(f"\033[91mError: Model '{url_part}' is not available\033[0m")
|
||||
return {}
|
||||
text = re.sub(r"<[^>]+>", " ", html)
|
||||
context_match = re.search(r"([\d,]+)\s*context", text)
|
||||
if context_match:
|
||||
context_str = context_match.group(1).replace(",", "")
|
||||
context_size = int(context_str)
|
||||
else:
|
||||
context_size = None
|
||||
input_cost_match = re.search(r"\$\s*([\d.]+)\s*/M input tokens", text, re.IGNORECASE)
|
||||
output_cost_match = re.search(r"\$\s*([\d.]+)\s*/M output tokens", text, re.IGNORECASE)
|
||||
input_cost = float(input_cost_match.group(1)) / 1000000 if input_cost_match else None
|
||||
output_cost = float(output_cost_match.group(1)) / 1000000 if output_cost_match else None
|
||||
if context_size is None or input_cost is None or output_cost is None:
|
||||
return {}
|
||||
params = {
|
||||
"max_input_tokens": context_size,
|
||||
"max_tokens": context_size,
|
||||
"max_output_tokens": context_size,
|
||||
"input_cost_per_token": input_cost,
|
||||
"output_cost_per_token": output_cost,
|
||||
}
|
||||
return params
|
||||
except Exception as e:
|
||||
print("Error fetching openrouter info:", str(e))
|
||||
return {}
|
||||
|
||||
|
||||
model_info_manager = ModelInfoManager()
|
||||
|
||||
@@ -295,6 +369,10 @@ class Model(ModelSettings):
|
||||
exact_match = True
|
||||
break # Continue to apply overrides
|
||||
|
||||
# Initialize accepts_settings if it's None
|
||||
if self.accepts_settings is None:
|
||||
self.accepts_settings = []
|
||||
|
||||
model = model.lower()
|
||||
|
||||
# If no exact match, try generic settings
|
||||
@@ -302,7 +380,11 @@ class Model(ModelSettings):
|
||||
self.apply_generic_model_settings(model)
|
||||
|
||||
# Apply override settings last if they exist
|
||||
if self.extra_model_settings and self.extra_model_settings.extra_params:
|
||||
if (
|
||||
self.extra_model_settings
|
||||
and self.extra_model_settings.extra_params
|
||||
and self.extra_model_settings.name == "aider/extra_params"
|
||||
):
|
||||
# Initialize extra_params if it doesn't exist
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
@@ -316,12 +398,38 @@ class Model(ModelSettings):
|
||||
# For non-dict values, simply update
|
||||
self.extra_params[key] = value
|
||||
|
||||
# Ensure OpenRouter models accept thinking_tokens and reasoning_effort
|
||||
if self.name.startswith("openrouter/"):
|
||||
if self.accepts_settings is None:
|
||||
self.accepts_settings = []
|
||||
if "thinking_tokens" not in self.accepts_settings:
|
||||
self.accepts_settings.append("thinking_tokens")
|
||||
if "reasoning_effort" not in self.accepts_settings:
|
||||
self.accepts_settings.append("reasoning_effort")
|
||||
|
||||
def apply_generic_model_settings(self, model):
|
||||
if "/o3-mini" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.use_temperature = False
|
||||
self.system_prompt_prefix = "Formatting re-enabled. "
|
||||
self.system_prompt_prefix = "Formatting re-enabled. "
|
||||
if "reasoning_effort" not in self.accepts_settings:
|
||||
self.accepts_settings.append("reasoning_effort")
|
||||
return # <--
|
||||
|
||||
if "gpt-4.1-mini" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.reminder = "sys"
|
||||
self.examples_as_sys_msg = False
|
||||
return # <--
|
||||
|
||||
if "gpt-4.1" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.reminder = "sys"
|
||||
self.examples_as_sys_msg = False
|
||||
return # <--
|
||||
|
||||
if "/o1-mini" in model:
|
||||
@@ -343,6 +451,8 @@ class Model(ModelSettings):
|
||||
self.use_temperature = False
|
||||
self.streaming = False
|
||||
self.system_prompt_prefix = "Formatting re-enabled. "
|
||||
if "reasoning_effort" not in self.accepts_settings:
|
||||
self.accepts_settings.append("reasoning_effort")
|
||||
return # <--
|
||||
|
||||
if "deepseek" in model and "v3" in model:
|
||||
@@ -358,7 +468,6 @@ class Model(ModelSettings):
|
||||
self.examples_as_sys_msg = True
|
||||
self.use_temperature = False
|
||||
self.reasoning_tag = "think"
|
||||
self.reasoning_tag = "think"
|
||||
return # <--
|
||||
|
||||
if ("llama3" in model or "llama-3" in model) and "70b" in model:
|
||||
@@ -384,6 +493,15 @@ class Model(ModelSettings):
|
||||
self.reminder = "sys"
|
||||
return # <--
|
||||
|
||||
if "3-7-sonnet" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.examples_as_sys_msg = True
|
||||
self.reminder = "user"
|
||||
if "thinking_tokens" not in self.accepts_settings:
|
||||
self.accepts_settings.append("thinking_tokens")
|
||||
return # <--
|
||||
|
||||
if "3.5-sonnet" in model or "3-5-sonnet" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
@@ -417,6 +535,14 @@ class Model(ModelSettings):
|
||||
self.extra_params = dict(top_p=0.95)
|
||||
return # <--
|
||||
|
||||
if "qwen3" in model and "235b" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.system_prompt_prefix = "/no_think"
|
||||
self.use_temperature = 0.7
|
||||
self.extra_params = {"top_p": 0.8, "top_k": 20, "min_p": 0.0}
|
||||
return # <--
|
||||
|
||||
# use the defaults
|
||||
if self.edit_format == "diff":
|
||||
self.use_repo_map = True
|
||||
@@ -464,6 +590,8 @@ class Model(ModelSettings):
|
||||
|
||||
if not self.editor_edit_format:
|
||||
self.editor_edit_format = self.editor_model.edit_format
|
||||
if self.editor_edit_format in ("diff", "whole", "diff-fenced"):
|
||||
self.editor_edit_format = "editor-" + self.editor_edit_format
|
||||
|
||||
return self.editor_model
|
||||
|
||||
@@ -572,6 +700,21 @@ class Model(ModelSettings):
|
||||
|
||||
model = self.name
|
||||
res = litellm.validate_environment(model)
|
||||
|
||||
# If missing AWS credential keys but AWS_PROFILE is set, consider AWS credentials valid
|
||||
if res["missing_keys"] and any(
|
||||
key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] for key in res["missing_keys"]
|
||||
):
|
||||
if model.startswith("bedrock/") or model.startswith("us.anthropic."):
|
||||
if os.environ.get("AWS_PROFILE"):
|
||||
res["missing_keys"] = [
|
||||
k
|
||||
for k in res["missing_keys"]
|
||||
if k not in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
||||
]
|
||||
if not res["missing_keys"]:
|
||||
res["keys_in_environment"] = True
|
||||
|
||||
if res["keys_in_environment"]:
|
||||
return res
|
||||
if res["missing_keys"]:
|
||||
@@ -599,11 +742,18 @@ class Model(ModelSettings):
|
||||
def set_reasoning_effort(self, effort):
|
||||
"""Set the reasoning effort parameter for models that support it"""
|
||||
if effort is not None:
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning_effort"] = effort
|
||||
if self.name.startswith("openrouter/"):
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning"] = {"effort": effort}
|
||||
else:
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning_effort"] = effort
|
||||
|
||||
def parse_token_value(self, value):
|
||||
"""
|
||||
@@ -646,16 +796,40 @@ class Model(ModelSettings):
|
||||
self.use_temperature = False
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num_tokens}
|
||||
|
||||
def get_thinking_tokens(self, model):
|
||||
# OpenRouter models use 'reasoning' instead of 'thinking'
|
||||
if self.name.startswith("openrouter/"):
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning"] = {"max_tokens": num_tokens}
|
||||
else:
|
||||
self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num_tokens}
|
||||
|
||||
def get_raw_thinking_tokens(self):
|
||||
"""Get formatted thinking token budget if available"""
|
||||
if (
|
||||
model.extra_params
|
||||
and "thinking" in model.extra_params
|
||||
and "budget_tokens" in model.extra_params["thinking"]
|
||||
):
|
||||
budget = model.extra_params["thinking"]["budget_tokens"]
|
||||
budget = None
|
||||
|
||||
if self.extra_params:
|
||||
# Check for OpenRouter reasoning format
|
||||
if self.name.startswith("openrouter/"):
|
||||
if (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning" in self.extra_params["extra_body"]
|
||||
and "max_tokens" in self.extra_params["extra_body"]["reasoning"]
|
||||
):
|
||||
budget = self.extra_params["extra_body"]["reasoning"]["max_tokens"]
|
||||
# Check for standard thinking format
|
||||
elif (
|
||||
"thinking" in self.extra_params and "budget_tokens" in self.extra_params["thinking"]
|
||||
):
|
||||
budget = self.extra_params["thinking"]["budget_tokens"]
|
||||
|
||||
return budget
|
||||
|
||||
def get_thinking_tokens(self):
|
||||
budget = self.get_raw_thinking_tokens()
|
||||
|
||||
if budget is not None:
|
||||
# Format as xx.yK for thousands, xx.yM for millions
|
||||
if budget >= 1024 * 1024:
|
||||
value = budget / (1024 * 1024)
|
||||
@@ -671,14 +845,23 @@ class Model(ModelSettings):
|
||||
return f"{value:.1f}k"
|
||||
return None
|
||||
|
||||
def get_reasoning_effort(self, model):
|
||||
def get_reasoning_effort(self):
|
||||
"""Get reasoning effort value if available"""
|
||||
if (
|
||||
model.extra_params
|
||||
and "extra_body" in model.extra_params
|
||||
and "reasoning_effort" in model.extra_params["extra_body"]
|
||||
):
|
||||
return model.extra_params["extra_body"]["reasoning_effort"]
|
||||
if self.extra_params:
|
||||
# Check for OpenRouter reasoning format
|
||||
if self.name.startswith("openrouter/"):
|
||||
if (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning" in self.extra_params["extra_body"]
|
||||
and "effort" in self.extra_params["extra_body"]["reasoning"]
|
||||
):
|
||||
return self.extra_params["extra_body"]["reasoning"]["effort"]
|
||||
# Check for standard reasoning_effort format (e.g. in extra_body)
|
||||
elif (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning_effort" in self.extra_params["extra_body"]
|
||||
):
|
||||
return self.extra_params["extra_body"]["reasoning_effort"]
|
||||
return None
|
||||
|
||||
def is_deepseek_r1(self):
|
||||
@@ -699,7 +882,6 @@ class Model(ModelSettings):
|
||||
|
||||
kwargs = dict(
|
||||
model=self.name,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
@@ -730,6 +912,8 @@ class Model(ModelSettings):
|
||||
kwargs["timeout"] = request_timeout
|
||||
if self.verbose:
|
||||
dump(kwargs)
|
||||
kwargs["messages"] = messages
|
||||
|
||||
res = litellm.completion(**kwargs)
|
||||
return hash_object, res
|
||||
|
||||
@@ -741,6 +925,9 @@ class Model(ModelSettings):
|
||||
messages = ensure_alternating_roles(messages)
|
||||
retry_delay = 0.125
|
||||
|
||||
if self.verbose:
|
||||
dump(messages)
|
||||
|
||||
while True:
|
||||
try:
|
||||
kwargs = {
|
||||
@@ -924,7 +1111,10 @@ def fuzzy_match_models(name):
|
||||
name = name.lower()
|
||||
|
||||
chat_models = set()
|
||||
for orig_model, attrs in litellm.model_cost.items():
|
||||
model_metadata = list(litellm.model_cost.items())
|
||||
model_metadata += list(model_info_manager.local_model_metadata.items())
|
||||
|
||||
for orig_model, attrs in model_metadata:
|
||||
model = orig_model.lower()
|
||||
if attrs.get("mode") != "chat":
|
||||
continue
|
||||
|
||||
428
aider/onboarding.py
Normal file
428
aider/onboarding.py
Normal file
@@ -0,0 +1,428 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import http.server
|
||||
import os
|
||||
import secrets
|
||||
import socketserver
|
||||
import threading
|
||||
import time
|
||||
import webbrowser
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
import requests
|
||||
|
||||
from aider import urls
|
||||
from aider.io import InputOutput
|
||||
|
||||
|
||||
def check_openrouter_tier(api_key):
|
||||
"""
|
||||
Checks if the user is on a free tier for OpenRouter.
|
||||
|
||||
Args:
|
||||
api_key: The OpenRouter API key to check.
|
||||
|
||||
Returns:
|
||||
A boolean indicating if the user is on a free tier (True) or paid tier (False).
|
||||
Returns True if the check fails.
|
||||
"""
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://openrouter.ai/api/v1/auth/key",
|
||||
headers={"Authorization": f"Bearer {api_key}"},
|
||||
timeout=5, # Add a reasonable timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
# According to the documentation, 'is_free_tier' will be true if the user has never paid
|
||||
return data.get("data", {}).get("is_free_tier", True) # Default to True if not found
|
||||
except Exception:
|
||||
# If there's any error, we'll default to assuming free tier
|
||||
return True
|
||||
|
||||
|
||||
def try_to_select_default_model():
|
||||
"""
|
||||
Attempts to select a default model based on available API keys.
|
||||
Checks OpenRouter tier status to select appropriate model.
|
||||
|
||||
Returns:
|
||||
The name of the selected model, or None if no suitable default is found.
|
||||
"""
|
||||
# Special handling for OpenRouter
|
||||
openrouter_key = os.environ.get("OPENROUTER_API_KEY")
|
||||
if openrouter_key:
|
||||
# Check if the user is on a free tier
|
||||
is_free_tier = check_openrouter_tier(openrouter_key)
|
||||
if is_free_tier:
|
||||
return "openrouter/google/gemini-2.5-pro-exp-03-25:free"
|
||||
else:
|
||||
return "openrouter/anthropic/claude-3.7-sonnet"
|
||||
|
||||
# Select model based on other available API keys
|
||||
model_key_pairs = [
|
||||
("ANTHROPIC_API_KEY", "sonnet"),
|
||||
("DEEPSEEK_API_KEY", "deepseek"),
|
||||
("OPENAI_API_KEY", "gpt-4o"),
|
||||
("GEMINI_API_KEY", "gemini/gemini-2.5-pro-exp-03-25"),
|
||||
("VERTEXAI_PROJECT", "vertex_ai/gemini-2.5-pro-exp-03-25"),
|
||||
]
|
||||
|
||||
for env_key, model_name in model_key_pairs:
|
||||
api_key_value = os.environ.get(env_key)
|
||||
if api_key_value:
|
||||
return model_name
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def offer_openrouter_oauth(io, analytics):
|
||||
"""
|
||||
Offers OpenRouter OAuth flow to the user if no API keys are found.
|
||||
|
||||
Args:
|
||||
io: The InputOutput object for user interaction.
|
||||
analytics: The Analytics object for tracking events.
|
||||
|
||||
Returns:
|
||||
True if authentication was successful, False otherwise.
|
||||
"""
|
||||
# No API keys found - Offer OpenRouter OAuth
|
||||
io.tool_output("OpenRouter provides free and paid access to many LLMs.")
|
||||
# Use confirm_ask which handles non-interactive cases
|
||||
if io.confirm_ask(
|
||||
"Login to OpenRouter or create a free account?",
|
||||
default="y",
|
||||
):
|
||||
analytics.event("oauth_flow_initiated", provider="openrouter")
|
||||
openrouter_key = start_openrouter_oauth_flow(io, analytics)
|
||||
if openrouter_key:
|
||||
# Successfully got key via OAuth, use the default OpenRouter model
|
||||
# Ensure OPENROUTER_API_KEY is now set in the environment for later use
|
||||
os.environ["OPENROUTER_API_KEY"] = openrouter_key
|
||||
# Track OAuth success leading to model selection
|
||||
analytics.event("oauth_flow_success")
|
||||
return True
|
||||
|
||||
# OAuth failed or was cancelled by user implicitly (e.g., closing browser)
|
||||
# Error messages are handled within start_openrouter_oauth_flow
|
||||
analytics.event("oauth_flow_failure")
|
||||
io.tool_error("OpenRouter authentication did not complete successfully.")
|
||||
# Fall through to the final error message
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def select_default_model(args, io, analytics):
|
||||
"""
|
||||
Selects a default model based on available API keys if no model is specified.
|
||||
Offers OAuth flow for OpenRouter if no keys are found.
|
||||
|
||||
Args:
|
||||
args: The command line arguments object.
|
||||
io: The InputOutput object for user interaction.
|
||||
analytics: The Analytics object for tracking events.
|
||||
|
||||
Returns:
|
||||
The name of the selected model, or None if no suitable default is found.
|
||||
"""
|
||||
if args.model:
|
||||
return args.model # Model already specified
|
||||
|
||||
model = try_to_select_default_model()
|
||||
if model:
|
||||
io.tool_warning(f"Using {model} model with API key from environment.")
|
||||
analytics.event("auto_model_selection", model=model)
|
||||
return model
|
||||
|
||||
no_model_msg = "No LLM model was specified and no API keys were provided."
|
||||
io.tool_warning(no_model_msg)
|
||||
|
||||
# Try OAuth if no model was detected
|
||||
offer_openrouter_oauth(io, analytics)
|
||||
|
||||
# Check again after potential OAuth success
|
||||
model = try_to_select_default_model()
|
||||
if model:
|
||||
return model
|
||||
|
||||
io.offer_url(urls.models_and_keys, "Open documentation URL for more info?")
|
||||
|
||||
|
||||
# Helper function to find an available port
|
||||
def find_available_port(start_port=8484, end_port=8584):
|
||||
for port in range(start_port, end_port + 1):
|
||||
try:
|
||||
# Check if the port is available by trying to bind to it
|
||||
with socketserver.TCPServer(("localhost", port), None):
|
||||
return port
|
||||
except OSError:
|
||||
# Port is likely already in use
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
# PKCE code generation
|
||||
def generate_pkce_codes():
|
||||
code_verifier = secrets.token_urlsafe(64)
|
||||
hasher = hashlib.sha256()
|
||||
hasher.update(code_verifier.encode("utf-8"))
|
||||
code_challenge = base64.urlsafe_b64encode(hasher.digest()).rstrip(b"=").decode("utf-8")
|
||||
return code_verifier, code_challenge
|
||||
|
||||
|
||||
# Function to exchange the authorization code for an API key
|
||||
def exchange_code_for_key(code, code_verifier, io):
|
||||
try:
|
||||
response = requests.post(
|
||||
"https://openrouter.ai/api/v1/auth/keys",
|
||||
headers={"Content-Type": "application/json"},
|
||||
json={
|
||||
"code": code,
|
||||
"code_verifier": code_verifier,
|
||||
"code_challenge_method": "S256",
|
||||
},
|
||||
timeout=30, # Add a timeout
|
||||
)
|
||||
response.raise_for_status() # Raise exception for bad status codes (4xx or 5xx)
|
||||
data = response.json()
|
||||
api_key = data.get("key")
|
||||
if not api_key:
|
||||
io.tool_error("Error: 'key' not found in OpenRouter response.")
|
||||
io.tool_error(f"Response: {response.text}")
|
||||
return None
|
||||
return api_key
|
||||
except requests.exceptions.Timeout:
|
||||
io.tool_error("Error: Request to OpenRouter timed out during code exchange.")
|
||||
return None
|
||||
except requests.exceptions.HTTPError as e:
|
||||
io.tool_error(
|
||||
"Error exchanging code for OpenRouter key:"
|
||||
f" {e.response.status_code} {e.response.reason}"
|
||||
)
|
||||
io.tool_error(f"Response: {e.response.text}")
|
||||
return None
|
||||
except requests.exceptions.RequestException as e:
|
||||
io.tool_error(f"Error exchanging code for OpenRouter key: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
io.tool_error(f"Unexpected error during code exchange: {e}")
|
||||
return None
|
||||
|
||||
|
||||
# Function to start the OAuth flow
|
||||
def start_openrouter_oauth_flow(io, analytics):
|
||||
"""Initiates the OpenRouter OAuth PKCE flow using a local server."""
|
||||
|
||||
port = find_available_port()
|
||||
if not port:
|
||||
io.tool_error("Could not find an available port between 8484 and 8584.")
|
||||
io.tool_error("Please ensure a port in this range is free, or configure manually.")
|
||||
return None
|
||||
|
||||
callback_url = f"http://localhost:{port}/callback/aider"
|
||||
auth_code = None
|
||||
server_error = None
|
||||
server_started = threading.Event()
|
||||
shutdown_server = threading.Event()
|
||||
|
||||
class OAuthCallbackHandler(http.server.SimpleHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
nonlocal auth_code, server_error
|
||||
parsed_path = urlparse(self.path)
|
||||
if parsed_path.path == "/callback/aider":
|
||||
query_params = parse_qs(parsed_path.query)
|
||||
if "code" in query_params:
|
||||
auth_code = query_params["code"][0]
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.end_headers()
|
||||
self.wfile.write(
|
||||
b"<html><body><h1>Success!</h1>"
|
||||
b"<p>Aider has received the authentication code. "
|
||||
b"You can close this browser tab.</p></body></html>"
|
||||
)
|
||||
# Signal the main thread to shut down the server
|
||||
# Signal the main thread to shut down the server
|
||||
shutdown_server.set()
|
||||
else:
|
||||
# Redirect to aider website if 'code' is missing (e.g., user visited manually)
|
||||
self.send_response(302) # Found (temporary redirect)
|
||||
self.send_header("Location", urls.website)
|
||||
self.end_headers()
|
||||
# No need to set server_error, just redirect.
|
||||
# Do NOT shut down the server here; wait for timeout or success.
|
||||
else:
|
||||
# Redirect anything else (e.g., favicon.ico) to the main website as well
|
||||
self.send_response(302)
|
||||
self.send_header("Location", urls.website)
|
||||
self.end_headers()
|
||||
self.wfile.write(b"Not Found")
|
||||
|
||||
def log_message(self, format, *args):
|
||||
# Suppress server logging to keep terminal clean
|
||||
pass
|
||||
|
||||
def run_server():
|
||||
nonlocal server_error
|
||||
try:
|
||||
with socketserver.TCPServer(("localhost", port), OAuthCallbackHandler) as httpd:
|
||||
io.tool_output(f"Temporary server listening on {callback_url}", log_only=True)
|
||||
server_started.set() # Signal that the server is ready
|
||||
# Wait until shutdown is requested or timeout occurs (handled by main thread)
|
||||
while not shutdown_server.is_set():
|
||||
httpd.handle_request() # Handle one request at a time
|
||||
# Add a small sleep to prevent busy-waiting if needed,
|
||||
# though handle_request should block appropriately.
|
||||
time.sleep(0.1)
|
||||
io.tool_output("Shutting down temporary server.", log_only=True)
|
||||
except Exception as e:
|
||||
server_error = f"Failed to start or run temporary server: {e}"
|
||||
server_started.set() # Signal even if failed, error will be checked
|
||||
shutdown_server.set() # Ensure shutdown logic proceeds
|
||||
|
||||
server_thread = threading.Thread(target=run_server, daemon=True)
|
||||
server_thread.start()
|
||||
|
||||
# Wait briefly for the server to start, or for an error
|
||||
if not server_started.wait(timeout=5):
|
||||
io.tool_error("Temporary authentication server failed to start in time.")
|
||||
shutdown_server.set() # Ensure thread exits if it eventually starts
|
||||
server_thread.join(timeout=1)
|
||||
return None
|
||||
|
||||
# Check if server failed during startup
|
||||
if server_error:
|
||||
io.tool_error(server_error)
|
||||
shutdown_server.set() # Ensure thread exits
|
||||
server_thread.join(timeout=1)
|
||||
return None
|
||||
|
||||
# Generate codes and URL
|
||||
code_verifier, code_challenge = generate_pkce_codes()
|
||||
auth_url_base = "https://openrouter.ai/auth"
|
||||
auth_params = {
|
||||
"callback_url": callback_url,
|
||||
"code_challenge": code_challenge,
|
||||
"code_challenge_method": "S256",
|
||||
}
|
||||
auth_url = f"{auth_url_base}?{'&'.join(f'{k}={v}' for k, v in auth_params.items())}"
|
||||
|
||||
io.tool_output("\nPlease open this URL in your browser to connect Aider with OpenRouter:")
|
||||
io.tool_output()
|
||||
print(auth_url)
|
||||
|
||||
MINUTES = 5
|
||||
io.tool_output(f"\nWaiting up to {MINUTES} minutes for you to finish in the browser...")
|
||||
io.tool_output("Use Control-C to interrupt.")
|
||||
|
||||
try:
|
||||
webbrowser.open(auth_url)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Wait for the callback to set the auth_code or for timeout/error
|
||||
interrupted = False
|
||||
try:
|
||||
shutdown_server.wait(timeout=MINUTES * 60) # Convert minutes to seconds
|
||||
except KeyboardInterrupt:
|
||||
io.tool_warning("\nOAuth flow interrupted.")
|
||||
analytics.event("oauth_flow_failed", provider="openrouter", reason="user_interrupt")
|
||||
interrupted = True
|
||||
# Ensure the server thread is signaled to shut down
|
||||
shutdown_server.set()
|
||||
|
||||
# Join the server thread to ensure it's cleaned up
|
||||
server_thread.join(timeout=1)
|
||||
|
||||
if interrupted:
|
||||
return None # Return None if interrupted by user
|
||||
|
||||
if server_error:
|
||||
io.tool_error(f"Authentication failed: {server_error}")
|
||||
analytics.event("oauth_flow_failed", provider="openrouter", reason=server_error)
|
||||
return None
|
||||
|
||||
if not auth_code:
|
||||
io.tool_error("Authentication with OpenRouter failed.")
|
||||
analytics.event("oauth_flow_failed", provider="openrouter")
|
||||
return None
|
||||
|
||||
io.tool_output("Completing authentication...")
|
||||
analytics.event("oauth_flow_code_received", provider="openrouter")
|
||||
|
||||
# Exchange code for key
|
||||
api_key = exchange_code_for_key(auth_code, code_verifier, io)
|
||||
|
||||
if api_key:
|
||||
# Set env var for the current session immediately
|
||||
os.environ["OPENROUTER_API_KEY"] = api_key
|
||||
|
||||
# Save the key to the oauth-keys.env file
|
||||
try:
|
||||
config_dir = os.path.expanduser("~/.aider")
|
||||
os.makedirs(config_dir, exist_ok=True)
|
||||
key_file = os.path.join(config_dir, "oauth-keys.env")
|
||||
with open(key_file, "a", encoding="utf-8") as f:
|
||||
f.write(f'OPENROUTER_API_KEY="{api_key}"\n')
|
||||
|
||||
io.tool_warning("Aider will load the OpenRouter key automatically in future sessions.")
|
||||
io.tool_output()
|
||||
|
||||
analytics.event("oauth_flow_success", provider="openrouter")
|
||||
return api_key
|
||||
except Exception as e:
|
||||
io.tool_error(f"Successfully obtained key, but failed to save it to file: {e}")
|
||||
io.tool_warning("Set OPENROUTER_API_KEY environment variable for this session only.")
|
||||
# Still return the key for the current session even if saving failed
|
||||
analytics.event("oauth_flow_save_failed", provider="openrouter", reason=str(e))
|
||||
return api_key
|
||||
else:
|
||||
io.tool_error("Authentication with OpenRouter failed.")
|
||||
analytics.event("oauth_flow_failed", provider="openrouter", reason="code_exchange_failed")
|
||||
return None
|
||||
|
||||
|
||||
# Dummy Analytics class for testing
|
||||
class DummyAnalytics:
|
||||
def event(self, *args, **kwargs):
|
||||
# print(f"Analytics Event: {args} {kwargs}") # Optional: print events
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to test the OpenRouter OAuth flow."""
|
||||
print("Starting OpenRouter OAuth flow test...")
|
||||
|
||||
# Use a real IO object for interaction
|
||||
io = InputOutput(
|
||||
pretty=True,
|
||||
yes=False,
|
||||
input_history_file=None,
|
||||
chat_history_file=None,
|
||||
tool_output_color="BLUE",
|
||||
tool_error_color="RED",
|
||||
)
|
||||
# Use a dummy analytics object
|
||||
analytics = DummyAnalytics()
|
||||
|
||||
# Ensure OPENROUTER_API_KEY is not set, to trigger the flow naturally
|
||||
# (though start_openrouter_oauth_flow doesn't check this itself)
|
||||
if "OPENROUTER_API_KEY" in os.environ:
|
||||
print("Warning: OPENROUTER_API_KEY is already set in environment.")
|
||||
# del os.environ["OPENROUTER_API_KEY"] # Optionally unset it for testing
|
||||
|
||||
api_key = start_openrouter_oauth_flow(io, analytics)
|
||||
|
||||
if api_key:
|
||||
print("\nOAuth flow completed successfully!")
|
||||
print(f"Obtained API Key (first 5 chars): {api_key[:5]}...")
|
||||
# Be careful printing the key, even partially
|
||||
else:
|
||||
print("\nOAuth flow failed or was cancelled.")
|
||||
|
||||
print("\nOpenRouter OAuth flow test finished.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
128
aider/openrouter.py
Normal file
128
aider/openrouter.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""
|
||||
OpenRouter model metadata caching and lookup.
|
||||
|
||||
This module keeps a local cached copy of the OpenRouter model list
|
||||
(downloaded from ``https://openrouter.ai/api/v1/models``) and exposes a
|
||||
helper class that returns metadata for a given model in a format compatible
|
||||
with litellm’s ``get_model_info``.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def _cost_per_token(val: str | None) -> float | None:
|
||||
"""Convert a per-million price string to a per-token float."""
|
||||
if val in (None, "", "0"):
|
||||
return 0.0 if val == "0" else None
|
||||
try:
|
||||
return float(val) / 1_000_000
|
||||
except Exception: # noqa: BLE001
|
||||
return None
|
||||
|
||||
|
||||
class OpenRouterModelManager:
|
||||
MODELS_URL = "https://openrouter.ai/api/v1/models"
|
||||
CACHE_TTL = 60 * 60 * 24 # 24 h
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.cache_dir = Path.home() / ".aider" / "caches"
|
||||
self.cache_file = self.cache_dir / "openrouter_models.json"
|
||||
self.content: Dict | None = None
|
||||
self.verify_ssl: bool = True
|
||||
self._cache_loaded = False
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Public API #
|
||||
# ------------------------------------------------------------------ #
|
||||
def set_verify_ssl(self, verify_ssl: bool) -> None:
|
||||
"""Enable/disable SSL verification for API requests."""
|
||||
self.verify_ssl = verify_ssl
|
||||
|
||||
def get_model_info(self, model: str) -> Dict:
|
||||
"""
|
||||
Return metadata for *model* or an empty ``dict`` when unknown.
|
||||
|
||||
``model`` should use the aider naming convention, e.g.
|
||||
``openrouter/nousresearch/deephermes-3-mistral-24b-preview:free``.
|
||||
"""
|
||||
self._ensure_content()
|
||||
if not self.content or "data" not in self.content:
|
||||
return {}
|
||||
|
||||
route = self._strip_prefix(model)
|
||||
|
||||
# Consider both the exact id and id without any “:suffix”.
|
||||
candidates = {route}
|
||||
if ":" in route:
|
||||
candidates.add(route.split(":", 1)[0])
|
||||
|
||||
record = next((item for item in self.content["data"] if item.get("id") in candidates), None)
|
||||
if not record:
|
||||
return {}
|
||||
|
||||
context_len = (
|
||||
record.get("top_provider", {}).get("context_length")
|
||||
or record.get("context_length")
|
||||
or None
|
||||
)
|
||||
|
||||
pricing = record.get("pricing", {})
|
||||
return {
|
||||
"max_input_tokens": context_len,
|
||||
"max_tokens": context_len,
|
||||
"max_output_tokens": context_len,
|
||||
"input_cost_per_token": _cost_per_token(pricing.get("prompt")),
|
||||
"output_cost_per_token": _cost_per_token(pricing.get("completion")),
|
||||
"litellm_provider": "openrouter",
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Internal helpers #
|
||||
# ------------------------------------------------------------------ #
|
||||
def _strip_prefix(self, model: str) -> str:
|
||||
return model[len("openrouter/") :] if model.startswith("openrouter/") else model
|
||||
|
||||
def _ensure_content(self) -> None:
|
||||
self._load_cache()
|
||||
if not self.content:
|
||||
self._update_cache()
|
||||
|
||||
def _load_cache(self) -> None:
|
||||
if self._cache_loaded:
|
||||
return
|
||||
try:
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
if self.cache_file.exists():
|
||||
cache_age = time.time() - self.cache_file.stat().st_mtime
|
||||
if cache_age < self.CACHE_TTL:
|
||||
try:
|
||||
self.content = json.loads(self.cache_file.read_text())
|
||||
except json.JSONDecodeError:
|
||||
self.content = None
|
||||
except OSError:
|
||||
# Cache directory might be unwritable; ignore.
|
||||
pass
|
||||
|
||||
self._cache_loaded = True
|
||||
|
||||
def _update_cache(self) -> None:
|
||||
try:
|
||||
response = requests.get(self.MODELS_URL, timeout=10, verify=self.verify_ssl)
|
||||
if response.status_code == 200:
|
||||
self.content = response.json()
|
||||
try:
|
||||
self.cache_file.write_text(json.dumps(self.content, indent=2))
|
||||
except OSError:
|
||||
pass # Non-fatal if we can’t write the cache
|
||||
except Exception as ex: # noqa: BLE001
|
||||
print(f"Failed to fetch OpenRouter model list: {ex}")
|
||||
try:
|
||||
self.cache_file.write_text("{}")
|
||||
except OSError:
|
||||
pass
|
||||
@@ -13,11 +13,13 @@ Generate a one-line commit message for those changes.
|
||||
The commit message should be structured as follows: <type>: <description>
|
||||
Use these for <type>: fix, feat, build, chore, ci, docs, style, refactor, perf, test
|
||||
|
||||
Ensure the commit message:
|
||||
Ensure the commit message:{language_instruction}
|
||||
- Starts with the appropriate prefix.
|
||||
- Is in the imperative mood (e.g., \"Add feature\" not \"Added feature\" or \"Adding feature\").
|
||||
- Is in the imperative mood (e.g., \"add feature\" not \"added feature\" or \"adding feature\").
|
||||
- Does not exceed 72 characters.
|
||||
|
||||
Reply only with the one-line commit message, without any additional text, explanations, or line breaks.
|
||||
|
||||
Reply only with the one-line commit message, without any additional text, explanations, \
|
||||
or line breaks.
|
||||
"""
|
||||
|
||||
115
aider/queries/tree-sitter-language-pack/ocaml-tags.scm
Normal file
115
aider/queries/tree-sitter-language-pack/ocaml-tags.scm
Normal file
@@ -0,0 +1,115 @@
|
||||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition (module_binding (module_name) @name.definition.module) @definition.module)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name.reference.module) @reference.module
|
||||
|
||||
; Module types
|
||||
;--------------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name.definition.interface) @definition.interface
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name.reference.implementation) @reference.implementation
|
||||
|
||||
; Functions
|
||||
;----------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_definition
|
||||
[
|
||||
(let_binding
|
||||
pattern: (value_name) @name.definition.function
|
||||
(parameter))
|
||||
(let_binding
|
||||
pattern: (value_name) @name.definition.function
|
||||
body: [(fun_expression) (function_expression)])
|
||||
] @definition.function
|
||||
)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name.definition.function) @definition.function
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(application_expression
|
||||
function: (value_path (value_name) @name.reference.call)) @reference.call
|
||||
|
||||
(infix_expression
|
||||
left: (value_path (value_name) @name.reference.call)
|
||||
operator: (concat_operator) @reference.call
|
||||
(#eq? @reference.call "@@"))
|
||||
|
||||
(infix_expression
|
||||
operator: (rel_operator) @reference.call
|
||||
right: (value_path (value_name) @name.reference.call)
|
||||
(#eq? @reference.call "|>"))
|
||||
|
||||
; Operator
|
||||
;---------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_definition
|
||||
(let_binding
|
||||
pattern: (parenthesized_operator (_) @name.definition.function)) @definition.function)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(prefix_operator)
|
||||
(sign_operator)
|
||||
(pow_operator)
|
||||
(mult_operator)
|
||||
(add_operator)
|
||||
(concat_operator)
|
||||
(rel_operator)
|
||||
(and_operator)
|
||||
(or_operator)
|
||||
(assign_operator)
|
||||
(hash_operator)
|
||||
(indexing_operator)
|
||||
(let_operator)
|
||||
(let_and_operator)
|
||||
(match_operator)
|
||||
] @name.reference.call @reference.call
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition (class_binding (class_name) @name.definition.class) @definition.class)
|
||||
(class_type_definition (class_type_binding (class_type_name) @name.definition.class) @definition.class)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name.reference.class)
|
||||
(class_type_path (class_type_name) @name.reference.class)
|
||||
] @reference.class
|
||||
|
||||
; Methods
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name.definition.method) @definition.method
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name.reference.call) @reference.call
|
||||
@@ -0,0 +1,98 @@
|
||||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition
|
||||
(module_binding (module_name) @name) @definition.module
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name) @reference.module
|
||||
(extended_module_path (module_name) @name) @reference.module
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name) @definition.interface
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name) @reference.implementation
|
||||
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition
|
||||
(class_binding (class_name) @name) @definition.class
|
||||
)
|
||||
(class_type_definition
|
||||
(class_type_binding (class_type_name) @name) @definition.class
|
||||
)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name)
|
||||
(class_type_path (class_type_name) @name)
|
||||
] @reference.class
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name) @definition.method
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name) @reference.call
|
||||
|
||||
|
||||
; Types
|
||||
;------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(type_definition
|
||||
(type_binding
|
||||
name: [
|
||||
(type_constructor) @name
|
||||
(type_constructor_path (type_constructor) @name)
|
||||
]
|
||||
) @definition.type
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(type_constructor_path (type_constructor) @name) @reference.type
|
||||
|
||||
[
|
||||
(constructor_declaration (constructor_name) @name)
|
||||
(tag_specification (tag) @name)
|
||||
] @definition.enum_variant
|
||||
|
||||
[
|
||||
(constructor_path (constructor_name) @name)
|
||||
(tag) @name
|
||||
] @reference.enum_variant
|
||||
|
||||
(field_declaration (field_name) @name) @definition.field
|
||||
|
||||
(field_path (field_name) @name) @reference.field
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_specification
|
||||
(value_name) @name.definition.function
|
||||
) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
98
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
Normal file
98
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
Normal file
@@ -0,0 +1,98 @@
|
||||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition
|
||||
(module_binding (module_name) @name) @definition.module
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name) @reference.module
|
||||
(extended_module_path (module_name) @name) @reference.module
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name) @definition.interface
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name) @reference.implementation
|
||||
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition
|
||||
(class_binding (class_name) @name) @definition.class
|
||||
)
|
||||
(class_type_definition
|
||||
(class_type_binding (class_type_name) @name) @definition.class
|
||||
)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name)
|
||||
(class_type_path (class_type_name) @name)
|
||||
] @reference.class
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name) @definition.method
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name) @reference.call
|
||||
|
||||
|
||||
; Types
|
||||
;------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(type_definition
|
||||
(type_binding
|
||||
name: [
|
||||
(type_constructor) @name
|
||||
(type_constructor_path (type_constructor) @name)
|
||||
]
|
||||
) @definition.type
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(type_constructor_path (type_constructor) @name) @reference.type
|
||||
|
||||
[
|
||||
(constructor_declaration (constructor_name) @name)
|
||||
(tag_specification (tag) @name)
|
||||
] @definition.enum_variant
|
||||
|
||||
[
|
||||
(constructor_path (constructor_name) @name)
|
||||
(tag) @name
|
||||
] @reference.enum_variant
|
||||
|
||||
(field_declaration (field_name) @name) @definition.field
|
||||
|
||||
(field_path (field_name) @name) @reference.field
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_specification
|
||||
(value_name) @name.definition.function
|
||||
) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
65
aider/queries/tree-sitter-languages/scala-tags.scm
Normal file
65
aider/queries/tree-sitter-languages/scala-tags.scm
Normal file
@@ -0,0 +1,65 @@
|
||||
; Definitions
|
||||
|
||||
(package_clause
|
||||
name: (package_identifier) @name.definition.module) @definition.module
|
||||
|
||||
(trait_definition
|
||||
name: (identifier) @name.definition.interface) @definition.interface
|
||||
|
||||
(enum_definition
|
||||
name: (identifier) @name.definition.enum) @definition.enum
|
||||
|
||||
(simple_enum_case
|
||||
name: (identifier) @name.definition.class) @definition.class
|
||||
|
||||
(full_enum_case
|
||||
name: (identifier) @name.definition.class) @definition.class
|
||||
|
||||
(class_definition
|
||||
name: (identifier) @name.definition.class) @definition.class
|
||||
|
||||
(object_definition
|
||||
name: (identifier) @name.definition.object) @definition.object
|
||||
|
||||
(function_definition
|
||||
name: (identifier) @name.definition.function) @definition.function
|
||||
|
||||
(val_definition
|
||||
pattern: (identifier) @name.definition.variable) @definition.variable
|
||||
|
||||
(given_definition
|
||||
name: (identifier) @name.definition.variable) @definition.variable
|
||||
|
||||
(var_definition
|
||||
pattern: (identifier) @name.definition.variable) @definition.variable
|
||||
|
||||
(val_declaration
|
||||
name: (identifier) @name.definition.variable) @definition.variable
|
||||
|
||||
(var_declaration
|
||||
name: (identifier) @name.definition.variable) @definition.variable
|
||||
|
||||
(type_definition
|
||||
name: (type_identifier) @name.definition.type) @definition.type
|
||||
|
||||
(class_parameter
|
||||
name: (identifier) @name.definition.property) @definition.property
|
||||
|
||||
; References
|
||||
|
||||
(call_expression
|
||||
(identifier) @name.reference.call) @reference.call
|
||||
|
||||
(instance_expression
|
||||
(type_identifier) @name.reference.interface) @reference.interface
|
||||
|
||||
(instance_expression
|
||||
(generic_type
|
||||
(type_identifier) @name.reference.interface)) @reference.interface
|
||||
|
||||
(extends_clause
|
||||
(type_identifier) @name.reference.class) @reference.class
|
||||
|
||||
(extends_clause
|
||||
(generic_type
|
||||
(type_identifier) @name.reference.class)) @reference.class
|
||||
240
aider/repo.py
240
aider/repo.py
@@ -1,3 +1,4 @@
|
||||
import contextlib
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path, PurePosixPath
|
||||
@@ -9,6 +10,7 @@ try:
|
||||
git.exc.ODBError,
|
||||
git.exc.GitError,
|
||||
git.exc.InvalidGitRepositoryError,
|
||||
git.exc.GitCommandNotFound,
|
||||
]
|
||||
except ImportError:
|
||||
git = None
|
||||
@@ -19,6 +21,7 @@ import pathspec
|
||||
from aider import prompts, utils
|
||||
|
||||
from .dump import dump # noqa: F401
|
||||
from .waiting import WaitingSpinner
|
||||
|
||||
ANY_GIT_ERROR += [
|
||||
OSError,
|
||||
@@ -33,6 +36,19 @@ ANY_GIT_ERROR += [
|
||||
ANY_GIT_ERROR = tuple(ANY_GIT_ERROR)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_git_env(var_name, value, original_value):
|
||||
"""Temporarily set a Git environment variable."""
|
||||
os.environ[var_name] = value
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if original_value is not None:
|
||||
os.environ[var_name] = original_value
|
||||
elif var_name in os.environ:
|
||||
del os.environ[var_name]
|
||||
|
||||
|
||||
class GitRepo:
|
||||
repo = None
|
||||
aider_ignore_file = None
|
||||
@@ -56,6 +72,8 @@ class GitRepo:
|
||||
attribute_commit_message_committer=False,
|
||||
commit_prompt=None,
|
||||
subtree_only=False,
|
||||
git_commit_verify=True,
|
||||
attribute_co_authored_by=False, # Added parameter
|
||||
):
|
||||
self.io = io
|
||||
self.models = models
|
||||
@@ -67,8 +85,10 @@ class GitRepo:
|
||||
self.attribute_committer = attribute_committer
|
||||
self.attribute_commit_message_author = attribute_commit_message_author
|
||||
self.attribute_commit_message_committer = attribute_commit_message_committer
|
||||
self.attribute_co_authored_by = attribute_co_authored_by # Assign from parameter
|
||||
self.commit_prompt = commit_prompt
|
||||
self.subtree_only = subtree_only
|
||||
self.git_commit_verify = git_commit_verify
|
||||
self.ignore_file_cache = {}
|
||||
|
||||
if git_dname:
|
||||
@@ -108,7 +128,76 @@ class GitRepo:
|
||||
if aider_ignore_file:
|
||||
self.aider_ignore_file = Path(aider_ignore_file)
|
||||
|
||||
def commit(self, fnames=None, context=None, message=None, aider_edits=False):
|
||||
def commit(self, fnames=None, context=None, message=None, aider_edits=False, coder=None):
|
||||
"""
|
||||
Commit the specified files or all dirty files if none are specified.
|
||||
|
||||
Args:
|
||||
fnames (list, optional): List of filenames to commit. Defaults to None (commit all
|
||||
dirty files).
|
||||
context (str, optional): Context for generating commit message. Defaults to None.
|
||||
message (str, optional): Explicit commit message. Defaults to None (generate message).
|
||||
aider_edits (bool, optional): Whether the changes were made by Aider. Defaults to False.
|
||||
This affects attribution logic.
|
||||
coder (Coder, optional): The Coder instance, used for config and model info.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
tuple(str, str) or None: The commit hash and commit message if successful,
|
||||
else None.
|
||||
|
||||
Attribution Logic:
|
||||
------------------
|
||||
This method handles Git commit attribution based on configuration flags and whether
|
||||
Aider generated the changes (`aider_edits`).
|
||||
|
||||
Key Concepts:
|
||||
- Author: The person who originally wrote the code changes.
|
||||
- Committer: The person who last applied the commit to the repository.
|
||||
- aider_edits=True: Changes were generated by Aider (LLM).
|
||||
- aider_edits=False: Commit is user-driven (e.g., /commit manually staged changes).
|
||||
- Explicit Setting: A flag (--attribute-...) is set to True or False
|
||||
via command line or config file.
|
||||
- Implicit Default: A flag is not explicitly set, defaulting to None in args, which is
|
||||
interpreted as True unless overridden by other logic.
|
||||
|
||||
Flags:
|
||||
- --attribute-author: Modify Author name to "User Name (aider)".
|
||||
- --attribute-committer: Modify Committer name to "User Name (aider)".
|
||||
- --attribute-co-authored-by: Add
|
||||
"Co-authored-by: aider (<model>) <noreply@aider.chat>" trailer to commit message.
|
||||
|
||||
Behavior Summary:
|
||||
|
||||
1. When aider_edits = True (AI Changes):
|
||||
- If --attribute-co-authored-by=True:
|
||||
- Co-authored-by trailer IS ADDED.
|
||||
- Author/Committer names are NOT modified by default (co-authored-by takes precedence).
|
||||
- EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY True, the
|
||||
respective name IS modified (explicit overrides precedence).
|
||||
- If --attribute-co-authored-by=False:
|
||||
- Co-authored-by trailer is NOT added.
|
||||
- Author/Committer names ARE modified by default (implicit True).
|
||||
- EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY False,
|
||||
the respective name is NOT modified.
|
||||
|
||||
2. When aider_edits = False (User Changes):
|
||||
- --attribute-co-authored-by is IGNORED (trailer never added).
|
||||
- Author name is NEVER modified (--attribute-author ignored).
|
||||
- Committer name IS modified by default (implicit True, as Aider runs `git commit`).
|
||||
- EXCEPTION: If --attribute-committer is EXPLICITLY False, the name is NOT modified.
|
||||
|
||||
Resulting Scenarios:
|
||||
- Standard AI edit (defaults): Co-authored-by=False -> Author=You(aider),
|
||||
Committer=You(aider)
|
||||
- AI edit with Co-authored-by (default): Co-authored-by=True -> Author=You,
|
||||
Committer=You, Trailer added
|
||||
- AI edit with Co-authored-by + Explicit Author: Co-authored-by=True,
|
||||
--attribute-author -> Author=You(aider), Committer=You, Trailer added
|
||||
- User commit (defaults): aider_edits=False -> Author=You, Committer=You(aider)
|
||||
- User commit with explicit no-committer: aider_edits=False,
|
||||
--no-attribute-committer -> Author=You, Committer=You
|
||||
"""
|
||||
if not fnames and not self.repo.is_dirty():
|
||||
return
|
||||
|
||||
@@ -119,21 +208,75 @@ class GitRepo:
|
||||
if message:
|
||||
commit_message = message
|
||||
else:
|
||||
commit_message = self.get_commit_message(diffs, context)
|
||||
user_language = None
|
||||
if coder:
|
||||
user_language = coder.get_user_language()
|
||||
commit_message = self.get_commit_message(diffs, context, user_language)
|
||||
|
||||
if aider_edits and self.attribute_commit_message_author:
|
||||
commit_message = "aider: " + commit_message
|
||||
elif self.attribute_commit_message_committer:
|
||||
commit_message = "aider: " + commit_message
|
||||
# Retrieve attribute settings, prioritizing coder.args if available
|
||||
if coder and hasattr(coder, "args"):
|
||||
attribute_author = coder.args.attribute_author
|
||||
attribute_committer = coder.args.attribute_committer
|
||||
attribute_commit_message_author = coder.args.attribute_commit_message_author
|
||||
attribute_commit_message_committer = coder.args.attribute_commit_message_committer
|
||||
attribute_co_authored_by = coder.args.attribute_co_authored_by
|
||||
else:
|
||||
# Fallback to self attributes (initialized from config/defaults)
|
||||
attribute_author = self.attribute_author
|
||||
attribute_committer = self.attribute_committer
|
||||
attribute_commit_message_author = self.attribute_commit_message_author
|
||||
attribute_commit_message_committer = self.attribute_commit_message_committer
|
||||
attribute_co_authored_by = self.attribute_co_authored_by
|
||||
|
||||
# Determine explicit settings (None means use default behavior)
|
||||
author_explicit = attribute_author is not None
|
||||
committer_explicit = attribute_committer is not None
|
||||
|
||||
# Determine effective settings (apply default True if not explicit)
|
||||
effective_author = True if attribute_author is None else attribute_author
|
||||
effective_committer = True if attribute_committer is None else attribute_committer
|
||||
|
||||
# Determine commit message prefixing
|
||||
prefix_commit_message = aider_edits and (
|
||||
attribute_commit_message_author or attribute_commit_message_committer
|
||||
)
|
||||
|
||||
# Determine Co-authored-by trailer
|
||||
commit_message_trailer = ""
|
||||
if aider_edits and attribute_co_authored_by:
|
||||
model_name = "unknown-model"
|
||||
if coder and hasattr(coder, "main_model") and coder.main_model.name:
|
||||
model_name = coder.main_model.name
|
||||
commit_message_trailer = (
|
||||
f"\n\nCo-authored-by: aider ({model_name}) <noreply@aider.chat>"
|
||||
)
|
||||
|
||||
# Determine if author/committer names should be modified
|
||||
# Author modification applies only to aider edits.
|
||||
# It's used if effective_author is True AND
|
||||
# (co-authored-by is False OR author was explicitly set).
|
||||
use_attribute_author = (
|
||||
aider_edits and effective_author and (not attribute_co_authored_by or author_explicit)
|
||||
)
|
||||
|
||||
# Committer modification applies regardless of aider_edits (based on tests).
|
||||
# It's used if effective_committer is True AND
|
||||
# (it's not an aider edit with co-authored-by OR committer was explicitly set).
|
||||
use_attribute_committer = effective_committer and (
|
||||
not (aider_edits and attribute_co_authored_by) or committer_explicit
|
||||
)
|
||||
|
||||
if not commit_message:
|
||||
commit_message = "(no commit message provided)"
|
||||
|
||||
full_commit_message = commit_message
|
||||
# if context:
|
||||
# full_commit_message += "\n\n# Aider chat conversation:\n\n" + context
|
||||
if prefix_commit_message:
|
||||
commit_message = "aider: " + commit_message
|
||||
|
||||
cmd = ["-m", full_commit_message, "--no-verify"]
|
||||
full_commit_message = commit_message + commit_message_trailer
|
||||
|
||||
cmd = ["-m", full_commit_message]
|
||||
if not self.git_commit_verify:
|
||||
cmd.append("--no-verify")
|
||||
if fnames:
|
||||
fnames = [str(self.abs_root_path(fn)) for fn in fnames]
|
||||
for fname in fnames:
|
||||
@@ -147,36 +290,32 @@ class GitRepo:
|
||||
|
||||
original_user_name = self.repo.git.config("--get", "user.name")
|
||||
original_committer_name_env = os.environ.get("GIT_COMMITTER_NAME")
|
||||
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
committer_name = f"{original_user_name} (aider)"
|
||||
|
||||
if self.attribute_committer:
|
||||
os.environ["GIT_COMMITTER_NAME"] = committer_name
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
os.environ["GIT_AUTHOR_NAME"] = committer_name
|
||||
|
||||
try:
|
||||
self.repo.git.commit(cmd)
|
||||
commit_hash = self.get_head_commit_sha(short=True)
|
||||
self.io.tool_output(f"Commit {commit_hash} {commit_message}", bold=True)
|
||||
return commit_hash, commit_message
|
||||
# Use context managers to handle environment variables
|
||||
with contextlib.ExitStack() as stack:
|
||||
if use_attribute_committer:
|
||||
stack.enter_context(
|
||||
set_git_env(
|
||||
"GIT_COMMITTER_NAME", committer_name, original_committer_name_env
|
||||
)
|
||||
)
|
||||
if use_attribute_author:
|
||||
stack.enter_context(
|
||||
set_git_env("GIT_AUTHOR_NAME", committer_name, original_author_name_env)
|
||||
)
|
||||
|
||||
# Perform the commit
|
||||
self.repo.git.commit(cmd)
|
||||
commit_hash = self.get_head_commit_sha(short=True)
|
||||
self.io.tool_output(f"Commit {commit_hash} {commit_message}", bold=True)
|
||||
return commit_hash, commit_message
|
||||
|
||||
except ANY_GIT_ERROR as err:
|
||||
self.io.tool_error(f"Unable to commit: {err}")
|
||||
finally:
|
||||
# Restore the env
|
||||
|
||||
if self.attribute_committer:
|
||||
if original_committer_name_env is not None:
|
||||
os.environ["GIT_COMMITTER_NAME"] = original_committer_name_env
|
||||
else:
|
||||
del os.environ["GIT_COMMITTER_NAME"]
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
if original_author_name_env is not None:
|
||||
os.environ["GIT_AUTHOR_NAME"] = original_author_name_env
|
||||
else:
|
||||
del os.environ["GIT_AUTHOR_NAME"]
|
||||
# No return here, implicitly returns None
|
||||
|
||||
def get_rel_repo_dir(self):
|
||||
try:
|
||||
@@ -184,7 +323,7 @@ class GitRepo:
|
||||
except (ValueError, OSError):
|
||||
return self.repo.git_dir
|
||||
|
||||
def get_commit_message(self, diffs, context):
|
||||
def get_commit_message(self, diffs, context, user_language=None):
|
||||
diffs = "# Diffs:\n" + diffs
|
||||
|
||||
content = ""
|
||||
@@ -193,6 +332,11 @@ class GitRepo:
|
||||
content += diffs
|
||||
|
||||
system_content = self.commit_prompt or prompts.commit_system
|
||||
language_instruction = ""
|
||||
if user_language:
|
||||
language_instruction = f"\n- Is written in {user_language}."
|
||||
system_content = system_content.format(language_instruction=language_instruction)
|
||||
|
||||
messages = [
|
||||
dict(role="system", content=system_content),
|
||||
dict(role="user", content=content),
|
||||
@@ -200,13 +344,15 @@ class GitRepo:
|
||||
|
||||
commit_message = None
|
||||
for model in self.models:
|
||||
num_tokens = model.token_count(messages)
|
||||
max_tokens = model.info.get("max_input_tokens") or 0
|
||||
if max_tokens and num_tokens > max_tokens:
|
||||
continue
|
||||
commit_message = model.simple_send_with_retries(messages)
|
||||
if commit_message:
|
||||
break
|
||||
spinner_text = f"Generating commit message with {model.name}"
|
||||
with WaitingSpinner(spinner_text):
|
||||
num_tokens = model.token_count(messages)
|
||||
max_tokens = model.info.get("max_input_tokens") or 0
|
||||
if max_tokens and num_tokens > max_tokens:
|
||||
continue
|
||||
commit_message = model.simple_send_with_retries(messages)
|
||||
if commit_message:
|
||||
break # Found a model that could generate the message
|
||||
|
||||
if not commit_message:
|
||||
self.io.tool_error("Failed to generate commit message!")
|
||||
@@ -289,13 +435,19 @@ class GitRepo:
|
||||
else:
|
||||
try:
|
||||
iterator = commit.tree.traverse()
|
||||
blob = None # Initialize blob
|
||||
while True:
|
||||
try:
|
||||
blob = next(iterator)
|
||||
if blob.type == "blob": # blob is a file
|
||||
files.add(blob.path)
|
||||
except IndexError:
|
||||
self.io.tool_warning(f"GitRepo: read error skipping {blob.path}")
|
||||
# Handle potential index error during tree traversal
|
||||
# without relying on potentially unassigned 'blob'
|
||||
self.io.tool_warning(
|
||||
"GitRepo: Index error encountered while reading git tree object."
|
||||
" Skipping."
|
||||
)
|
||||
continue
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
@@ -19,7 +19,7 @@ from tqdm import tqdm
|
||||
|
||||
from aider.dump import dump
|
||||
from aider.special import filter_important_files
|
||||
from aider.utils import Spinner
|
||||
from aider.waiting import Spinner
|
||||
|
||||
# tree_sitter is throwing a FutureWarning
|
||||
warnings.simplefilter("ignore", category=FutureWarning)
|
||||
@@ -35,6 +35,8 @@ CACHE_VERSION = 3
|
||||
if USING_TSL_PACK:
|
||||
CACHE_VERSION = 4
|
||||
|
||||
UPDATING_REPO_MAP_MESSAGE = "Updating repo map"
|
||||
|
||||
|
||||
class RepoMap:
|
||||
TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}"
|
||||
@@ -380,7 +382,7 @@ class RepoMap:
|
||||
if self.verbose:
|
||||
self.io.tool_output(f"Processing {fname}")
|
||||
if progress and not showing_bar:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {fname}")
|
||||
|
||||
try:
|
||||
file_ok = Path(fname).is_file()
|
||||
@@ -398,13 +400,30 @@ class RepoMap:
|
||||
|
||||
# dump(fname)
|
||||
rel_fname = self.get_rel_fname(fname)
|
||||
current_pers = 0.0 # Start with 0 personalization score
|
||||
|
||||
if fname in chat_fnames:
|
||||
personalization[rel_fname] = personalize
|
||||
current_pers += personalize
|
||||
chat_rel_fnames.add(rel_fname)
|
||||
|
||||
if rel_fname in mentioned_fnames:
|
||||
personalization[rel_fname] = personalize
|
||||
# Use max to avoid double counting if in chat_fnames and mentioned_fnames
|
||||
current_pers = max(current_pers, personalize)
|
||||
|
||||
# Check path components against mentioned_idents
|
||||
path_obj = Path(rel_fname)
|
||||
path_components = set(path_obj.parts)
|
||||
basename_with_ext = path_obj.name
|
||||
basename_without_ext, _ = os.path.splitext(basename_with_ext)
|
||||
components_to_check = path_components.union({basename_with_ext, basename_without_ext})
|
||||
|
||||
matched_idents = components_to_check.intersection(mentioned_idents)
|
||||
if matched_idents:
|
||||
# Add personalization *once* if any path component matches a mentioned ident
|
||||
current_pers += personalize
|
||||
|
||||
if current_pers > 0:
|
||||
personalization[rel_fname] = current_pers # Assign the final calculated value
|
||||
|
||||
tags = list(self.get_tags(fname, rel_fname))
|
||||
if tags is None:
|
||||
@@ -442,15 +461,22 @@ class RepoMap:
|
||||
|
||||
for ident in idents:
|
||||
if progress:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {ident}")
|
||||
|
||||
definers = defines[ident]
|
||||
|
||||
mul = 1.0
|
||||
|
||||
is_snake = ("_" in ident) and any(c.isalpha() for c in ident)
|
||||
is_camel = any(c.isupper() for c in ident) and any(c.islower() for c in ident)
|
||||
if ident in mentioned_idents:
|
||||
mul = 10
|
||||
elif ident.startswith("_"):
|
||||
mul = 0.1
|
||||
else:
|
||||
mul = 1
|
||||
mul *= 10
|
||||
if (is_snake or is_camel) and len(ident) >= 8:
|
||||
mul *= 10
|
||||
if ident.startswith("_"):
|
||||
mul *= 0.1
|
||||
if len(defines[ident]) > 5:
|
||||
mul *= 0.1
|
||||
|
||||
for referencer, num_refs in Counter(references[ident]).items():
|
||||
for definer in definers:
|
||||
@@ -458,10 +484,14 @@ class RepoMap:
|
||||
# if referencer == definer:
|
||||
# continue
|
||||
|
||||
use_mul = mul
|
||||
if referencer in chat_rel_fnames:
|
||||
use_mul *= 50
|
||||
|
||||
# scale down so high freq (low value) mentions don't dominate
|
||||
num_refs = math.sqrt(num_refs)
|
||||
|
||||
G.add_edge(referencer, definer, weight=mul * num_refs, ident=ident)
|
||||
G.add_edge(referencer, definer, weight=use_mul * num_refs, ident=ident)
|
||||
|
||||
if not references:
|
||||
pass
|
||||
@@ -484,7 +514,7 @@ class RepoMap:
|
||||
ranked_definitions = defaultdict(float)
|
||||
for src in G.nodes:
|
||||
if progress:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {src}")
|
||||
|
||||
src_rank = ranked[src]
|
||||
total_weight = sum(data["weight"] for _src, _dst, data in G.out_edges(src, data=True))
|
||||
@@ -593,7 +623,7 @@ class RepoMap:
|
||||
if not mentioned_idents:
|
||||
mentioned_idents = set()
|
||||
|
||||
spin = Spinner("Updating repo map")
|
||||
spin = Spinner(UPDATING_REPO_MAP_MESSAGE)
|
||||
|
||||
ranked_tags = self.get_ranked_tags(
|
||||
chat_fnames,
|
||||
@@ -627,7 +657,11 @@ class RepoMap:
|
||||
while lower_bound <= upper_bound:
|
||||
# dump(lower_bound, middle, upper_bound)
|
||||
|
||||
spin.step()
|
||||
if middle > 1500:
|
||||
show_tokens = f"{middle / 1000.0:.1f}K"
|
||||
else:
|
||||
show_tokens = str(middle)
|
||||
spin.step(f"{UPDATING_REPO_MAP_MESSAGE}: {show_tokens} tokens")
|
||||
|
||||
tree = self.to_tree(ranked_tags[:middle], chat_rel_fnames)
|
||||
num_tokens = self.token_count(tree)
|
||||
|
||||
@@ -15,22 +15,6 @@
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-r1": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.00000055,
|
||||
"input_cost_per_token_cache_hit": 0.00000014,
|
||||
"cache_read_input_token_cost": 0.00000014,
|
||||
"cache_creation_input_token_cost": 0.0,
|
||||
"output_cost_per_token": 0.00000219,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
//"supports_function_calling": true,
|
||||
"supports_assistant_prefill": true,
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-r1:free": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
@@ -63,6 +47,33 @@
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-chat-v3-0324": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.00000055,
|
||||
"input_cost_per_token_cache_hit": 0.00000014,
|
||||
"cache_read_input_token_cost": 0.00000014,
|
||||
"cache_creation_input_token_cost": 0.0,
|
||||
"output_cost_per_token": 0.00000219,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
//"supports_function_calling": true,
|
||||
"supports_assistant_prefill": true,
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-chat-v3-0324:free": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"litellm_provider": "openrouter",
|
||||
"supports_prompt_caching": true,
|
||||
"mode": "chat",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-r1": {
|
||||
"max_tokens": 160000,
|
||||
"max_input_tokens": 128000,
|
||||
@@ -72,8 +83,8 @@
|
||||
"output_cost_per_token": 0.000008,
|
||||
"mode": "chat",
|
||||
},
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-v3": {
|
||||
"max_tokens": 128000,
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-v3-0324": {
|
||||
"max_tokens": 160000,
|
||||
"max_input_tokens": 100000,
|
||||
"max_output_tokens": 8192,
|
||||
"litellm_provider": "fireworks_ai",
|
||||
@@ -81,53 +92,25 @@
|
||||
"output_cost_per_token": 0.0000009,
|
||||
"mode": "chat",
|
||||
},
|
||||
"o3-mini": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openai/o3-mini": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"openrouter/openrouter/quasar-alpha": {
|
||||
"max_input_tokens": 1000000,
|
||||
"max_output_tokens": 32000,
|
||||
"input_cost_per_token": 0.0,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/openai/o3-mini-high": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"openrouter/openrouter/optimus-alpha": {
|
||||
"max_input_tokens": 1000000,
|
||||
"max_output_tokens": 32000,
|
||||
"input_cost_per_token": 0.0,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/openai/gpt-4o-mini": {
|
||||
"max_tokens": 16384,
|
||||
@@ -147,26 +130,6 @@
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true
|
||||
},
|
||||
"claude-3-7-sonnet-20250219": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true,
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"anthropic/claude-3-7-sonnet-20250219": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
@@ -187,43 +150,6 @@
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"openrouter/anthropic/claude-3.7-sonnet": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true,
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gpt-4.5-preview": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.000075,
|
||||
"output_cost_per_token": 0.00015,
|
||||
"cache_read_input_token_cost": 0.0000375,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"openai/gpt-4.5-preview": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
@@ -241,4 +167,302 @@
|
||||
"supports_system_messages": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gemini/gemini-2.5-pro-exp-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
//"litellm_provider": "vertex_ai-language-models",
|
||||
"litellm_provider": "gemini",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_audio_input": true,
|
||||
"supports_video_input": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"vertex_ai/gemini-2.5-pro-exp-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_audio_input": true,
|
||||
"supports_video_input": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"vertex_ai/gemini-2.5-pro-preview-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0.000010,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_audio_input": true,
|
||||
"supports_video_input": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"openrouter/google/gemini-2.5-pro-preview-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0.000010,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_audio_input": true,
|
||||
"supports_video_input": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"openrouter/google/gemini-2.5-pro-exp-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_audio_input": true,
|
||||
"supports_video_input": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"openrouter/x-ai/grok-3-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/x-ai/grok-3-mini-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0.0000003,
|
||||
"output_cost_per_token": 0.0000005,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/x-ai/grok-3-fast-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0.000005,
|
||||
"output_cost_per_token": 0.000025,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/x-ai/grok-3-mini-fast-beta": {
|
||||
"max_tokens": 131072,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 131072,
|
||||
"input_cost_per_token": 0.0000006,
|
||||
"output_cost_per_token": 0.000004,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
},
|
||||
"openrouter/google/gemini-2.0-flash-exp:free": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 8192,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gemini-2.5-pro-preview-05-06": {
|
||||
"max_tokens": 65536,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 65536,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.00000125,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_reasoning": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"],
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview"
|
||||
},
|
||||
"gemini/gemini-2.5-pro-preview-05-06": {
|
||||
"max_tokens": 65536,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 65536,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.0000007,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "gemini",
|
||||
"mode": "chat",
|
||||
"rpm": 10000,
|
||||
"tpm": 10000000,
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview"
|
||||
},
|
||||
"together_ai/Qwen/Qwen3-235B-A22B-fp8-tput": {
|
||||
"input_cost_per_token": 0.0000002,
|
||||
"output_cost_per_token": 0.0000006,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,6 +185,7 @@
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-7-sonnet-20250219
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
@@ -196,8 +197,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-7-sonnet-20250219
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: anthropic/claude-3-7-sonnet-latest
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
@@ -209,6 +212,7 @@
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-7-sonnet-latest
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: claude-3-7-sonnet-20250219
|
||||
edit_format: diff
|
||||
@@ -222,8 +226,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-7-sonnet-20250219
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: claude-3-7-sonnet-latest
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
@@ -235,8 +241,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-7-sonnet-latest
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
@@ -248,8 +256,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
@@ -261,8 +271,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
@@ -274,8 +286,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
@@ -287,8 +301,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: vertex_ai/claude-3-7-sonnet@20250219
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
@@ -297,8 +313,10 @@
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai/claude-3-7-sonnet@20250219
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
@@ -307,8 +325,10 @@
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: openrouter/anthropic/claude-3.7-sonnet
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
@@ -320,8 +340,10 @@
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.7-sonnet
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: openrouter/anthropic/claude-3.7-sonnet:beta
|
||||
overeager: true
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
@@ -333,6 +355,7 @@
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.7-sonnet
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
edit_format: diff
|
||||
@@ -560,6 +583,16 @@
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: openrouter/deepseek/deepseek-chat-v3-0324:free
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
|
||||
editor_edit_format: editor-diff
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-r1:free
|
||||
editor_edit_format: editor-diff
|
||||
@@ -635,6 +668,15 @@
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/deepseek/deepseek-chat-v3-0324
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: openrouter/openai/gpt-4o
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
@@ -694,6 +736,7 @@
|
||||
streaming: false
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: o1-preview
|
||||
edit_format: architect
|
||||
@@ -732,6 +775,7 @@
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: openai/o1
|
||||
edit_format: diff
|
||||
@@ -742,6 +786,7 @@
|
||||
editor_model_name: openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: o1
|
||||
edit_format: diff
|
||||
@@ -752,6 +797,7 @@
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
edit_format: diff
|
||||
@@ -771,7 +817,7 @@
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
@@ -792,6 +838,14 @@
|
||||
extra_params:
|
||||
max_tokens: 128000
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3-0324
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 160000
|
||||
|
||||
- name: openai/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -800,7 +854,8 @@
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -809,6 +864,7 @@
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: openrouter/openai/o3-mini
|
||||
edit_format: diff
|
||||
@@ -818,6 +874,7 @@
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: openrouter/openai/o3-mini-high
|
||||
edit_format: diff
|
||||
@@ -827,6 +884,7 @@
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: azure/o3-mini
|
||||
edit_format: diff
|
||||
@@ -836,6 +894,7 @@
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
|
||||
- name: gpt-4.5-preview
|
||||
edit_format: diff
|
||||
@@ -846,7 +905,7 @@
|
||||
examples_as_sys_msg: true
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
|
||||
- name: openai/gpt-4.5-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -891,7 +950,489 @@
|
||||
|
||||
- name: gemini/gemma-3-27b-it
|
||||
use_system_prompt: false
|
||||
|
||||
|
||||
- name: openrouter/google/gemma-3-27b-it:free
|
||||
use_system_prompt: false
|
||||
|
||||
|
||||
- name: openrouter/google/gemma-3-27b-it
|
||||
use_system_prompt: false
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-03-25
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: gemini/gemini-2.0-flash
|
||||
|
||||
- name: gemini/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
overeager: true
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-03-25
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/openrouter/quasar-alpha
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/x-ai/grok-3-beta
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
|
||||
- name: xai/grok-3-beta
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
|
||||
- name: openrouter/x-ai/grok-3-mini-beta
|
||||
use_repo_map: true
|
||||
edit_format: whole
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: xai/grok-3-mini-beta
|
||||
use_repo_map: true
|
||||
edit_format: whole
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: low
|
||||
|
||||
- name: openrouter/x-ai/grok-3-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
|
||||
- name: xai/grok-3-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
|
||||
- name: openrouter/x-ai/grok-3-mini-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: whole
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: xai/grok-3-mini-fast-beta
|
||||
use_repo_map: true
|
||||
edit_format: whole
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: openrouter/openrouter/optimus-alpha
|
||||
use_repo_map: true
|
||||
edit_format: diff
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys # user: 52.x%/96.9%
|
||||
examples_as_sys_msg: false # true: 51.6% correct, 95.6% well formed; false: 52.4%/98.2%
|
||||
editor_model_name: gpt-4.1-mini
|
||||
|
||||
- name: openai/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
editor_model_name: openai/gpt-4.1-mini
|
||||
|
||||
- name: azure/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
editor_model_name: azure/gpt-4.1-mini
|
||||
|
||||
- name: openrouter/openai/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1-mini
|
||||
|
||||
- name: gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false # false: 32.x%/92.4% (60+ malformed responses); true: 31.7/90.2/60+
|
||||
|
||||
- name: openai/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
|
||||
- name: azure/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
|
||||
- name: openrouter/openai/gpt-4.1-mini
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: false
|
||||
|
||||
- name: o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: azure/o3
|
||||
streaming: false
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/openai/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: azure/o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: o4-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4.1-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
accepts_settings: ["reasoning_effort"]
|
||||
examples_as_sys_msg: true
|
||||
#extra_params:
|
||||
# extra_body:
|
||||
# reasoning_effort: high
|
||||
|
||||
- name: gemini/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-03-25
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-05-06
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-05-06
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
|
||||
#- name: openrouter/qwen/qwen3-235b-a22b
|
||||
# system_prompt_prefix: "/no_think"
|
||||
# use_temperature: 0.7
|
||||
# extra_params:
|
||||
# max_tokens: 24000
|
||||
# top_p: 0.8
|
||||
# top_k: 20
|
||||
# min_p: 0.0
|
||||
# temperature: 0.7
|
||||
# extra_body:
|
||||
# provider:
|
||||
# order: ["Together"]
|
||||
|
||||
#- name: together_ai/Qwen/Qwen3-235B-A22B-fp8-tput
|
||||
# system_prompt_prefix: "/no_think"
|
||||
# use_temperature: 0.7
|
||||
# reasoning_tag: think
|
||||
# extra_params:
|
||||
# max_tokens: 24000
|
||||
# top_p: 0.8
|
||||
# top_k: 20
|
||||
# min_p: 0.0
|
||||
# temperature: 0.7
|
||||
|
||||
@@ -14,7 +14,7 @@ aider_user_agent = f"Aider/{__version__} +{urls.website}"
|
||||
# platforms.
|
||||
|
||||
|
||||
def install_playwright(io):
|
||||
def check_env():
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
@@ -29,6 +29,16 @@ def install_playwright(io):
|
||||
except Exception:
|
||||
has_chromium = False
|
||||
|
||||
return has_pip, has_chromium
|
||||
|
||||
|
||||
def has_playwright():
|
||||
has_pip, has_chromium = check_env()
|
||||
return has_pip and has_chromium
|
||||
|
||||
|
||||
def install_playwright(io):
|
||||
has_pip, has_chromium = check_env()
|
||||
if has_pip and has_chromium:
|
||||
return True
|
||||
|
||||
@@ -159,7 +169,8 @@ class Scraper:
|
||||
try:
|
||||
response = page.goto(url, wait_until="networkidle", timeout=5000)
|
||||
except PlaywrightTimeoutError:
|
||||
self.print_error(f"Timeout while loading {url}")
|
||||
print(f"Page didn't quiesce, scraping content anyway: {url}")
|
||||
response = None
|
||||
except PlaywrightError as e:
|
||||
self.print_error(f"Error navigating to {url}: {str(e)}")
|
||||
return None, None
|
||||
@@ -261,7 +272,7 @@ def slimdown_html(soup):
|
||||
|
||||
|
||||
def main(url):
|
||||
scraper = Scraper()
|
||||
scraper = Scraper(playwright_available=has_playwright())
|
||||
content = scraper.scrape(url)
|
||||
print(content)
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import itertools
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import oslex
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.waiting import Spinner
|
||||
|
||||
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".pdf"}
|
||||
|
||||
@@ -250,55 +250,6 @@ def run_install(cmd):
|
||||
return False, output
|
||||
|
||||
|
||||
class Spinner:
|
||||
unicode_spinner = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
||||
ascii_spinner = ["|", "/", "-", "\\"]
|
||||
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
self.start_time = time.time()
|
||||
self.last_update = 0
|
||||
self.visible = False
|
||||
self.is_tty = sys.stdout.isatty()
|
||||
self.tested = False
|
||||
|
||||
def test_charset(self):
|
||||
if self.tested:
|
||||
return
|
||||
self.tested = True
|
||||
# Try unicode first, fall back to ascii if needed
|
||||
try:
|
||||
# Test if we can print unicode characters
|
||||
print(self.unicode_spinner[0], end="", flush=True)
|
||||
print("\r", end="", flush=True)
|
||||
self.spinner_chars = itertools.cycle(self.unicode_spinner)
|
||||
except UnicodeEncodeError:
|
||||
self.spinner_chars = itertools.cycle(self.ascii_spinner)
|
||||
|
||||
def step(self):
|
||||
if not self.is_tty:
|
||||
return
|
||||
|
||||
current_time = time.time()
|
||||
if not self.visible and current_time - self.start_time >= 0.5:
|
||||
self.visible = True
|
||||
self._step()
|
||||
elif self.visible and current_time - self.last_update >= 0.1:
|
||||
self._step()
|
||||
self.last_update = current_time
|
||||
|
||||
def _step(self):
|
||||
if not self.visible:
|
||||
return
|
||||
|
||||
self.test_charset()
|
||||
print(f"\r{self.text} {next(self.spinner_chars)}\r{self.text} ", end="", flush=True)
|
||||
|
||||
def end(self):
|
||||
if self.visible and self.is_tty:
|
||||
print("\r" + " " * (len(self.text) + 3))
|
||||
|
||||
|
||||
def find_common_root(abs_fnames):
|
||||
try:
|
||||
if len(abs_fnames) == 1:
|
||||
@@ -308,7 +259,11 @@ def find_common_root(abs_fnames):
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
return safe_abs_path(os.getcwd())
|
||||
try:
|
||||
return safe_abs_path(os.getcwd())
|
||||
except FileNotFoundError:
|
||||
# Fallback if cwd is deleted
|
||||
return "."
|
||||
|
||||
|
||||
def format_tokens(count):
|
||||
@@ -380,19 +335,4 @@ def printable_shell_command(cmd_list):
|
||||
Returns:
|
||||
str: Shell-escaped command string.
|
||||
"""
|
||||
if platform.system() == "Windows":
|
||||
return subprocess.list2cmdline(cmd_list)
|
||||
else:
|
||||
return shlex.join(cmd_list)
|
||||
|
||||
|
||||
def main():
|
||||
spinner = Spinner("Running spinner...")
|
||||
for _ in range(40): # 40 steps * 0.25 seconds = 10 seconds
|
||||
time.sleep(0.25)
|
||||
spinner.step()
|
||||
spinner.end()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
return oslex.join(cmd_list)
|
||||
|
||||
221
aider/waiting.py
Normal file
221
aider/waiting.py
Normal file
@@ -0,0 +1,221 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Thread-based, killable spinner utility.
|
||||
|
||||
Use it like:
|
||||
|
||||
from aider.waiting import WaitingSpinner
|
||||
|
||||
spinner = WaitingSpinner("Waiting for LLM")
|
||||
spinner.start()
|
||||
... # long task
|
||||
spinner.stop()
|
||||
"""
|
||||
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
|
||||
class Spinner:
|
||||
"""
|
||||
Minimal spinner that scans a single marker back and forth across a line.
|
||||
|
||||
The animation is pre-rendered into a list of frames. If the terminal
|
||||
cannot display unicode the frames are converted to plain ASCII.
|
||||
"""
|
||||
|
||||
last_frame_idx = 0 # Class variable to store the last frame index
|
||||
|
||||
def __init__(self, text: str, width: int = 7):
|
||||
self.text = text
|
||||
self.start_time = time.time()
|
||||
self.last_update = 0.0
|
||||
self.visible = False
|
||||
self.is_tty = sys.stdout.isatty()
|
||||
self.console = Console()
|
||||
|
||||
# Pre-render the animation frames using pure ASCII so they will
|
||||
# always display, even on very limited terminals.
|
||||
ascii_frames = [
|
||||
"#= ", # C1 C2 space(8)
|
||||
"=# ", # C2 C1 space(8)
|
||||
" =# ", # space(1) C2 C1 space(7)
|
||||
" =# ", # space(2) C2 C1 space(6)
|
||||
" =# ", # space(3) C2 C1 space(5)
|
||||
" =# ", # space(4) C2 C1 space(4)
|
||||
" =# ", # space(5) C2 C1 space(3)
|
||||
" =# ", # space(6) C2 C1 space(2)
|
||||
" =# ", # space(7) C2 C1 space(1)
|
||||
" =#", # space(8) C2 C1
|
||||
" #=", # space(8) C1 C2
|
||||
" #= ", # space(7) C1 C2 space(1)
|
||||
" #= ", # space(6) C1 C2 space(2)
|
||||
" #= ", # space(5) C1 C2 space(3)
|
||||
" #= ", # space(4) C1 C2 space(4)
|
||||
" #= ", # space(3) C1 C2 space(5)
|
||||
" #= ", # space(2) C1 C2 space(6)
|
||||
" #= ", # space(1) C1 C2 space(7)
|
||||
]
|
||||
|
||||
self.unicode_palette = "░█"
|
||||
xlate_from, xlate_to = ("=#", self.unicode_palette)
|
||||
|
||||
# If unicode is supported, swap the ASCII chars for nicer glyphs.
|
||||
if self._supports_unicode():
|
||||
translation_table = str.maketrans(xlate_from, xlate_to)
|
||||
frames = [f.translate(translation_table) for f in ascii_frames]
|
||||
self.scan_char = xlate_to[xlate_from.find("#")]
|
||||
else:
|
||||
frames = ascii_frames
|
||||
self.scan_char = "#"
|
||||
|
||||
# Bounce the scanner back and forth.
|
||||
self.frames = frames
|
||||
self.frame_idx = Spinner.last_frame_idx # Initialize from class variable
|
||||
self.width = len(frames[0]) - 2 # number of chars between the brackets
|
||||
self.animation_len = len(frames[0])
|
||||
self.last_display_len = 0 # Length of the last spinner line (frame + text)
|
||||
|
||||
def _supports_unicode(self) -> bool:
|
||||
if not self.is_tty:
|
||||
return False
|
||||
try:
|
||||
out = self.unicode_palette
|
||||
out += "\b" * len(self.unicode_palette)
|
||||
out += " " * len(self.unicode_palette)
|
||||
out += "\b" * len(self.unicode_palette)
|
||||
sys.stdout.write(out)
|
||||
sys.stdout.flush()
|
||||
return True
|
||||
except UnicodeEncodeError:
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _next_frame(self) -> str:
|
||||
frame = self.frames[self.frame_idx]
|
||||
self.frame_idx = (self.frame_idx + 1) % len(self.frames)
|
||||
Spinner.last_frame_idx = self.frame_idx # Update class variable
|
||||
return frame
|
||||
|
||||
def step(self, text: str = None) -> None:
|
||||
if text is not None:
|
||||
self.text = text
|
||||
|
||||
if not self.is_tty:
|
||||
return
|
||||
|
||||
now = time.time()
|
||||
if not self.visible and now - self.start_time >= 0.5:
|
||||
self.visible = True
|
||||
self.last_update = 0.0
|
||||
if self.is_tty:
|
||||
self.console.show_cursor(False)
|
||||
|
||||
if not self.visible or now - self.last_update < 0.1:
|
||||
return
|
||||
|
||||
self.last_update = now
|
||||
frame_str = self._next_frame()
|
||||
|
||||
# Determine the maximum width for the spinner line
|
||||
# Subtract 2 as requested, to leave a margin or prevent cursor wrapping issues
|
||||
max_spinner_width = self.console.width - 2
|
||||
if max_spinner_width < 0: # Handle extremely narrow terminals
|
||||
max_spinner_width = 0
|
||||
|
||||
current_text_payload = f" {self.text}"
|
||||
line_to_display = f"{frame_str}{current_text_payload}"
|
||||
|
||||
# Truncate the line if it's too long for the console width
|
||||
if len(line_to_display) > max_spinner_width:
|
||||
line_to_display = line_to_display[:max_spinner_width]
|
||||
|
||||
len_line_to_display = len(line_to_display)
|
||||
|
||||
# Calculate padding to clear any remnants from a longer previous line
|
||||
padding_to_clear = " " * max(0, self.last_display_len - len_line_to_display)
|
||||
|
||||
# Write the spinner frame, text, and any necessary clearing spaces
|
||||
sys.stdout.write(f"\r{line_to_display}{padding_to_clear}")
|
||||
self.last_display_len = len_line_to_display
|
||||
|
||||
# Calculate number of backspaces to position cursor at the scanner character
|
||||
scan_char_abs_pos = frame_str.find(self.scan_char)
|
||||
|
||||
# Total characters written to the line (frame + text + padding)
|
||||
total_chars_written_on_line = len_line_to_display + len(padding_to_clear)
|
||||
|
||||
# num_backspaces will be non-positive if scan_char_abs_pos is beyond
|
||||
# total_chars_written_on_line (e.g., if the scan char itself was truncated).
|
||||
# (e.g., if the scan char itself was truncated).
|
||||
# In such cases, (effectively) 0 backspaces are written,
|
||||
# and the cursor stays at the end of the line.
|
||||
num_backspaces = total_chars_written_on_line - scan_char_abs_pos
|
||||
sys.stdout.write("\b" * num_backspaces)
|
||||
sys.stdout.flush()
|
||||
|
||||
def end(self) -> None:
|
||||
if self.visible and self.is_tty:
|
||||
clear_len = self.last_display_len # Use the length of the last displayed content
|
||||
sys.stdout.write("\r" + " " * clear_len + "\r")
|
||||
sys.stdout.flush()
|
||||
self.console.show_cursor(True)
|
||||
self.visible = False
|
||||
|
||||
|
||||
class WaitingSpinner:
|
||||
"""Background spinner that can be started/stopped safely."""
|
||||
|
||||
def __init__(self, text: str = "Waiting for LLM", delay: float = 0.15):
|
||||
self.spinner = Spinner(text)
|
||||
self.delay = delay
|
||||
self._stop_event = threading.Event()
|
||||
self._thread = threading.Thread(target=self._spin, daemon=True)
|
||||
|
||||
def _spin(self):
|
||||
while not self._stop_event.is_set():
|
||||
self.spinner.step()
|
||||
time.sleep(self.delay)
|
||||
self.spinner.end()
|
||||
|
||||
def start(self):
|
||||
"""Start the spinner in a background thread."""
|
||||
if not self._thread.is_alive():
|
||||
self._thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Request the spinner to stop and wait briefly for the thread to exit."""
|
||||
self._stop_event.set()
|
||||
if self._thread.is_alive():
|
||||
self._thread.join(timeout=self.delay)
|
||||
self.spinner.end()
|
||||
|
||||
# Allow use as a context-manager
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop()
|
||||
|
||||
|
||||
def main():
|
||||
spinner = Spinner("Running spinner...")
|
||||
try:
|
||||
for _ in range(100):
|
||||
time.sleep(0.15)
|
||||
spinner.step()
|
||||
print("Success!")
|
||||
except KeyboardInterrupt:
|
||||
print("\nInterrupted by user.")
|
||||
finally:
|
||||
spinner.end()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -34,6 +34,8 @@ def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:
|
||||
"__pycache__/", # Python cache dir
|
||||
".DS_Store", # macOS metadata
|
||||
"Thumbs.db", # Windows thumbnail cache
|
||||
"*.svg",
|
||||
"*.pdf",
|
||||
# IDE files
|
||||
".idea/", # JetBrains IDEs
|
||||
".vscode/", # VS Code
|
||||
@@ -64,7 +66,9 @@ class FileWatcher:
|
||||
"""Watches source files for changes and AI comments"""
|
||||
|
||||
# Compiled regex pattern for AI comments
|
||||
ai_comment_pattern = re.compile(r"(?:#|//|--) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE)
|
||||
ai_comment_pattern = re.compile(
|
||||
r"(?:#|//|--|;+) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE
|
||||
)
|
||||
|
||||
def __init__(self, coder, gitignores=None, verbose=False, analytics=None, root=None):
|
||||
self.coder = coder
|
||||
@@ -93,15 +97,19 @@ class FileWatcher:
|
||||
|
||||
rel_path = path_abs.relative_to(self.root)
|
||||
if self.verbose:
|
||||
dump(rel_path)
|
||||
print("Changed", rel_path)
|
||||
|
||||
if self.gitignore_spec and self.gitignore_spec.match_file(
|
||||
rel_path.as_posix() + ("/" if path_abs.is_dir() else "")
|
||||
):
|
||||
return False
|
||||
|
||||
# Check file size before reading content
|
||||
if path_abs.is_file() and path_abs.stat().st_size > 1 * 1024 * 1024: # 1MB limit
|
||||
return False
|
||||
|
||||
if self.verbose:
|
||||
dump("ok", rel_path)
|
||||
print("Checking", rel_path)
|
||||
|
||||
# Check if file contains AI markers
|
||||
try:
|
||||
@@ -262,7 +270,7 @@ class FileWatcher:
|
||||
line_nums.append(i)
|
||||
comments.append(comment)
|
||||
comment = comment.lower()
|
||||
comment = comment.lstrip("/#-")
|
||||
comment = comment.lstrip("/#-;") # Added semicolon for Lisp comments
|
||||
comment = comment.strip()
|
||||
if comment.startswith("ai!") or comment.endswith("ai!"):
|
||||
has_action = "!"
|
||||
|
||||
@@ -7,12 +7,13 @@ description: Release notes and stats on aider writing its own code.
|
||||
|
||||
# Release history
|
||||
|
||||
{% include blame.md %}
|
||||
|
||||
The above
|
||||
[stats are based on the git commit history](/docs/faq.html#how-are-the-aider-wrote-xx-of-code-stats-computed)
|
||||
Aider writes most of its own code, usually about 70-80% of the new code in each release.
|
||||
These
|
||||
[statistics are based on the git commit history](/docs/faq.html#how-are-the-aider-wrote-xx-of-code-stats-computed)
|
||||
of the aider repo.
|
||||
|
||||
{% include blame.md %}
|
||||
|
||||
## Release notes
|
||||
|
||||
<!--[[[cog
|
||||
@@ -25,6 +26,211 @@ cog.out(text)
|
||||
|
||||
### main branch
|
||||
|
||||
- Bumped configargparse to 1.7.1 as 1.7 was pulled.
|
||||
- Added shell tab completion for file path arguments (by saviour) and for `--edit-format`/`--editor-edit-format` options.
|
||||
- Improved OpenRouter model metadata handling by introducing a local cache, increasing reliability and performance.
|
||||
- The `/settings` command now displays detailed metadata for active main, editor, and weak models.
|
||||
- Fixed an issue where files explicitly added via the command line were not correctly ignored if listed in `.gitignore`.
|
||||
- Improved automatic commit messages by providing more context during their generation, by wangboxue.
|
||||
- Aider wrote 89% of the code in this release.
|
||||
|
||||
### Aider v0.83.1
|
||||
|
||||
- Improved user language detection by correctly normalizing hyphenated language codes (e.g., `en-US` to `en`) and enhancing the validation of locale results.
|
||||
- Prevented Aider from instructing the LLM to reply in 'C' or 'POSIX' when these are detected as the system locale.
|
||||
- Displayed a spinner with the model name when generating commit messages.
|
||||
|
||||
### Aider v0.83.0
|
||||
|
||||
- Added support for `gemini-2.5-pro-preview-05-06` models.
|
||||
- Added support for `qwen3-235b` models.
|
||||
- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
|
||||
- Added a spinner animation while waiting for the LLM to start streaming its response.
|
||||
- Updated the spinner animation to a Knight Rider style.
|
||||
- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
|
||||
- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
|
||||
- Marked Gemini 2.5 Pro preview models as `overeager` by default.
|
||||
- Commit message prompt specifies the user's language.
|
||||
- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
|
||||
- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
|
||||
- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
|
||||
- Added tracking of total tokens sent and received, now included in benchmark statistics.
|
||||
- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
|
||||
- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
|
||||
- Improved cost calculation using `litellm.completion_cost` where available.
|
||||
- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
|
||||
- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
|
||||
- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
|
||||
- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
|
||||
- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
|
||||
- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
|
||||
- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
|
||||
- Improved display of filenames in the prompt header using rich Text formatting.
|
||||
- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
|
||||
- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
|
||||
- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
|
||||
- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
|
||||
- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
|
||||
- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
|
||||
- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
|
||||
- Dropped support for Python 3.9.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.82.3
|
||||
|
||||
- Add support for `gemini-2.5-flash-preview-04-17` models.
|
||||
- Improved robustness of edit block parsing when filenames start with backticks or fences.
|
||||
- Add new `udiff-simple` edit format, for Gemini 2.5 Pro.
|
||||
- Update default weak/editor models for Gemini 2.5 Pro models to use `gemini-2.5-flash-preview-04-17`.
|
||||
- Instruct models to reply in the user's detected system language.
|
||||
- Fix parsing of diffs for newly created files (`--- /dev/null`).
|
||||
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
|
||||
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
|
||||
- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Skip scanning files larger than 1MB for AI comments (`--watch`).
|
||||
|
||||
### Aider v0.82.2
|
||||
|
||||
- Fix editing shell files with diff-fenced, by zjy1412.
|
||||
- Improve robustness of patch application by allowing multiple update/delete actions for the same file within a single response.
|
||||
- Update prompts to instruct LLMs to consolidate all edits for a given file into a single block within the patch.
|
||||
|
||||
### Aider v0.82.1
|
||||
|
||||
- Added support for `o3` and `o4-mini` including provider-specific versions for OpenAI, OpenRouter, and Azure.
|
||||
- Added support for Azure specific `gpt-4.1` and `gpt-4.1-mini` models.
|
||||
- Disabled streaming for `o3` models since you need identity verification to stream.
|
||||
- Fixed handling of file paths in unified diffs, especially those generated by git.
|
||||
|
||||
### Aider v0.82.0
|
||||
|
||||
- Support for GPT 4.1, mini and nano.
|
||||
- Added new `patch` edit format for OpenAI's GPT-4.1 model.
|
||||
- Improved support for using architect mode with Gemini 2.5 Pro.
|
||||
- Added new `editor-diff`, `editor-whole`, and `editor-diff-fenced` edit formats.
|
||||
- Bugfix for automatically selecting the best edit format to use in architect mode.
|
||||
- Added support for `grok-3-fast-beta` and `grok-3-mini-fast-beta` models.
|
||||
- Aider wrote 92% of the code in this release.
|
||||
|
||||
### Aider v0.81.3
|
||||
|
||||
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
|
||||
- Updated default settings for Grok models.
|
||||
|
||||
### Aider v0.81.2
|
||||
|
||||
- Add support for `xai/grok-3-beta`, `xai/grok-3-mini-beta`, `openrouter/x-ai/grok-3-beta`, `openrouter/x-ai/grok-3-mini-beta`, and `openrouter/openrouter/optimus-alpha` models.
|
||||
- Add alias "grok3" for `xai/grok-3-beta`.
|
||||
- Add alias "optimus" for `openrouter/openrouter/optimus-alpha`.
|
||||
- Fix URL extraction from error messages.
|
||||
- Allow adding files by full path even if a file with the same basename is already in the chat.
|
||||
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
|
||||
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
|
||||
- Commit messages generated by aider are now lowercase, by Anton Ödman.
|
||||
|
||||
### Aider v0.81.1
|
||||
|
||||
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
|
||||
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
|
||||
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
|
||||
|
||||
### Aider v0.81.0
|
||||
|
||||
- Added support for the `openrouter/openrouter/quasar-alpha` model.
|
||||
- Run with `aider --model quasar`
|
||||
- Offer OpenRouter OAuth authentication if an OpenRouter model is specified but the API key is missing.
|
||||
- Prevent retrying API calls when the provider reports insufficient credits.
|
||||
- Improve URL detection to exclude trailing double quotes.
|
||||
- Aider wrote 86% of the code in this release.
|
||||
|
||||
### Aider v0.80.4
|
||||
|
||||
- Bumped deps to pickup litellm change to properly display the root cause of OpenRouter "choices" errors.
|
||||
|
||||
### Aider v0.80.3
|
||||
|
||||
- Improve error message for OpenRouter API connection issues to mention potential rate limiting or upstream provider issues.
|
||||
- Configure weak models (`gemini/gemini-2.0-flash` and `openrouter/google/gemini-2.0-flash-exp:free`) for Gemini 2.5 Pro models.
|
||||
- Add model metadata for `openrouter/google/gemini-2.0-flash-exp:free`.
|
||||
|
||||
### Aider v0.80.2
|
||||
|
||||
- Bumped deps.
|
||||
|
||||
### Aider v0.80.1
|
||||
|
||||
- Updated deps for yanked fsspec and aiohttp packages #3699
|
||||
- Removed redundant dependency check during OpenRouter OAuth flow, by Claudia Pellegrino.
|
||||
|
||||
### Aider v0.80.0
|
||||
|
||||
- OpenRouter OAuth integration:
|
||||
- Offer to OAuth against OpenRouter if no model and keys are provided.
|
||||
- Select OpenRouter default model based on free/paid tier status if `OPENROUTER_API_KEY` is set and no model is specified.
|
||||
- Prioritize `gemini/gemini-2.5-pro-exp-03-25` if `GEMINI_API_KEY` is set, and `vertex_ai/gemini-2.5-pro-exp-03-25` if `VERTEXAI_PROJECT` is set, when no model is specified.
|
||||
- Validate user-configured color settings on startup and warn/disable invalid ones.
|
||||
- Warn at startup if `--stream` and `--cache-prompts` are used together, as cost estimates may be inaccurate.
|
||||
- Boost repomap ranking for files whose path components match identifiers mentioned in the chat.
|
||||
- Change web scraping timeout from an error to a warning, allowing scraping to continue with potentially incomplete content.
|
||||
- Left-align markdown headings in the terminal output, by Peter Schilling.
|
||||
- Update edit format to the new model's default when switching models with `/model`, if the user was using the old model's default format.
|
||||
- Add `Ctrl-X Ctrl-E` keybinding to edit the current input buffer in an external editor, by Matteo Landi.
|
||||
- Fix linting errors for filepaths containing shell metacharacters, by Mir Adnan ALI.
|
||||
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
|
||||
- Add repomap support for the Scala language, by Vasil Markoukin.
|
||||
- Fixed bug in `/run` that was preventing auto-testing.
|
||||
- Fix bug preventing `UnboundLocalError` during git tree traversal.
|
||||
- Handle `GitCommandNotFound` error if git is not installed or not in PATH.
|
||||
- Handle `FileNotFoundError` if the current working directory is deleted while aider is running.
|
||||
- Fix completion menu current item color styling, by Andrey Ivanov.
|
||||
- Aider wrote 87% of the code in this release.
|
||||
|
||||
### Aider v0.79.2
|
||||
|
||||
- Added 'gemini' alias for gemini-2.5-pro model.
|
||||
- Updated Gemini 2.5 Pro max output tokens to 64k.
|
||||
- Added support for Lisp-style semicolon comments in file watcher, by Matteo Landi.
|
||||
- Added OpenRouter API error detection and retries.
|
||||
- Added openrouter/deepseek-chat-v3-0324 model.
|
||||
- Aider wrote 93% of the code in this release.
|
||||
|
||||
### Aider v0.79.1
|
||||
|
||||
- Improved model listing to include all models in fuzzy matching, including those provided by aider (not litellm).
|
||||
|
||||
### Aider v0.79.0
|
||||
|
||||
- Added support for Gemini 2.5 Pro models.
|
||||
- Added support for DeepSeek V3 0324 model.
|
||||
- Added a new `/context` command that automatically identifies which files need to be edited for a given request.
|
||||
- Added `/edit` as an alias for the `/editor` command.
|
||||
- Added "overeager" mode for Claude 3.7 Sonnet models to try and keep it working within the requested scope.
|
||||
- Aider wrote 65% of the code in this release.
|
||||
|
||||
### Aider v0.78.0
|
||||
|
||||
- Added support for thinking tokens for OpenRouter Sonnet 3.7.
|
||||
- Added commands to switch between model types: `/editor-model` for Editor Model, and `/weak-model` for Weak Model, by csala.
|
||||
- Added model setting validation to ignore `--reasoning-effort` and `--thinking-tokens` if the model doesn't support them.
|
||||
- Added `--check-model-accepts-settings` flag (default: true) to force unsupported model settings.
|
||||
- Annotated which models support reasoning_effort and thinking_tokens settings in the model settings data.
|
||||
- Improved code block rendering in markdown output with better padding using NoInsetMarkdown.
|
||||
- Added `--git-commit-verify` flag (default: False) to control whether git commit hooks are bypassed.
|
||||
- Fixed autocompletion for `/ask`, `/code`, and `/architect` commands, by shladnik.
|
||||
- Added vi-like behavior when pressing enter in multiline-mode while in vi normal/navigation-mode, by Marco Mayer.
|
||||
- Added AWS_PROFILE support for Bedrock models, allowing use of AWS profiles instead of explicit credentials, by lentil32.
|
||||
- Enhanced `--aiderignore` argument to resolve both absolute and relative paths, by mopemope.
|
||||
- Improved platform information handling to gracefully handle retrieval errors.
|
||||
- Aider wrote 92% of the code in this release.
|
||||
|
||||
### Aider v0.77.1
|
||||
|
||||
- Bumped dependencies to pickup litellm fix for Ollama.
|
||||
- Added support for `openrouter/google/gemma-3-27b-it` model.
|
||||
- Updated exclude patterns for help documentation.
|
||||
|
||||
### Aider v0.77.0
|
||||
|
||||
- Big upgrade in [programming languages supported](https://aider.chat/docs/languages.html) by adopting [tree-sitter-language-pack](https://github.com/Goldziher/tree-sitter-language-pack/).
|
||||
- 130 new languages with linter support.
|
||||
- 20 new languages with repo-map support.
|
||||
@@ -238,7 +444,7 @@ cog.out(text)
|
||||
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
|
||||
- New `--copy-paste` mode.
|
||||
- New `/copy-context` command.
|
||||
- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- [Set API keys and other environment variables for all providers from command line or YAML conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- New `--api-key provider=key` setting.
|
||||
- New `--set-env VAR=value` setting.
|
||||
- Added bash and zsh support to `--watch-files`.
|
||||
@@ -406,7 +612,7 @@ cog.out(text)
|
||||
|
||||
### Aider v0.59.1
|
||||
|
||||
- Check for obsolete `yes: true` in yaml config, show helpful error.
|
||||
- Check for obsolete `yes: true` in YAML config, show helpful error.
|
||||
- Model settings for openrouter/anthropic/claude-3.5-sonnet:beta
|
||||
|
||||
### Aider v0.59.0
|
||||
@@ -416,7 +622,7 @@ cog.out(text)
|
||||
- Still auto-completes the full paths of the repo files like `/add`.
|
||||
- Now supports globs like `src/**/*.py`
|
||||
- Renamed `--yes` to `--yes-always`.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` yaml key.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` YAML key.
|
||||
- Existing YAML and .env files will need to be updated.
|
||||
- Can still abbreviate to `--yes` on the command line.
|
||||
- Config file now uses standard YAML list syntax with ` - list entries`, one per line.
|
||||
@@ -623,7 +829,7 @@ cog.out(text)
|
||||
- Use `--map-refresh <always|files|manual|auto>` to configure.
|
||||
- Improved cost estimate logic for caching.
|
||||
- Improved editing performance on Jupyter Notebook `.ipynb` files.
|
||||
- Show which config yaml file is loaded with `--verbose`.
|
||||
- Show which config YAML file is loaded with `--verbose`.
|
||||
- Bumped dependency versions.
|
||||
- Bugfix: properly load `.aider.models.metadata.json` data.
|
||||
- Bugfix: Using `--msg /ask ...` caused an exception.
|
||||
|
||||
@@ -32,7 +32,7 @@ aux_links:
|
||||
"GitHub":
|
||||
- "https://github.com/Aider-AI/aider"
|
||||
"Discord":
|
||||
- "https://discord.gg/Tv2uQnR88V"
|
||||
- "https://discord.gg/Y7X7bhMQFV"
|
||||
"Blog":
|
||||
- "/blog/"
|
||||
|
||||
@@ -40,7 +40,7 @@ nav_external_links:
|
||||
- title: "GitHub"
|
||||
url: "https://github.com/Aider-AI/aider"
|
||||
- title: "Discord"
|
||||
url: "https://discord.gg/Tv2uQnR88V"
|
||||
url: "https://discord.gg/Y7X7bhMQFV"
|
||||
|
||||
repository: Aider-AI/aider
|
||||
|
||||
@@ -51,4 +51,19 @@ callouts:
|
||||
note:
|
||||
title: Note
|
||||
color: yellow
|
||||
|
||||
# Custom CSS for our table of contents
|
||||
kramdown:
|
||||
syntax_highlighter_opts:
|
||||
css_class: highlight
|
||||
|
||||
sass:
|
||||
style: compressed
|
||||
|
||||
# Additional CSS
|
||||
compress_html:
|
||||
clippings: all
|
||||
comments: all
|
||||
endings: all
|
||||
startings: []
|
||||
|
||||
|
||||
@@ -4017,3 +4017,645 @@
|
||||
gmoz22: 4
|
||||
start_tag: v0.75.0
|
||||
total_lines: 1875
|
||||
- aider_percentage: 71.93
|
||||
aider_total: 1399
|
||||
end_date: '2025-03-13'
|
||||
end_tag: v0.77.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier (aider): 5
|
||||
aider/coders/architect_coder.py:
|
||||
Paul Gauthier (aider): 2
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier (aider): 14
|
||||
aider/commands.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 71
|
||||
aider/deprecated.py:
|
||||
Paul Gauthier: 2
|
||||
aider/io.py:
|
||||
Paul Gauthier (aider): 5
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 12
|
||||
aider/models.py:
|
||||
Paul Gauthier (aider): 83
|
||||
aider/queries/tree-sitter-language-pack/arduino-tags.scm:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 2
|
||||
aider/queries/tree-sitter-language-pack/c-tags.scm:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 5
|
||||
aider/queries/tree-sitter-language-pack/chatito-tags.scm:
|
||||
Paul Gauthier: 11
|
||||
Paul Gauthier (aider): 5
|
||||
aider/queries/tree-sitter-language-pack/commonlisp-tags.scm:
|
||||
Paul Gauthier: 116
|
||||
Paul Gauthier (aider): 6
|
||||
aider/queries/tree-sitter-language-pack/cpp-tags.scm:
|
||||
Paul Gauthier: 7
|
||||
Paul Gauthier (aider): 8
|
||||
aider/queries/tree-sitter-language-pack/d-tags.scm:
|
||||
Paul Gauthier: 9
|
||||
Paul Gauthier (aider): 17
|
||||
aider/queries/tree-sitter-language-pack/dart-tags.scm:
|
||||
Paul Gauthier: 42
|
||||
Paul Gauthier (aider): 19
|
||||
aider/queries/tree-sitter-language-pack/elisp-tags.scm:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 2
|
||||
aider/queries/tree-sitter-language-pack/elixir-tags.scm:
|
||||
Paul Gauthier: 10
|
||||
Paul Gauthier (aider): 8
|
||||
aider/queries/tree-sitter-language-pack/elm-tags.scm:
|
||||
Paul Gauthier: 8
|
||||
Paul Gauthier (aider): 11
|
||||
aider/queries/tree-sitter-language-pack/gleam-tags.scm:
|
||||
Paul Gauthier: 26
|
||||
Paul Gauthier (aider): 15
|
||||
aider/queries/tree-sitter-language-pack/go-tags.scm:
|
||||
Paul Gauthier: 14
|
||||
Paul Gauthier (aider): 14
|
||||
aider/queries/tree-sitter-language-pack/java-tags.scm:
|
||||
Paul Gauthier: 10
|
||||
Paul Gauthier (aider): 7
|
||||
aider/queries/tree-sitter-language-pack/lua-tags.scm:
|
||||
Paul Gauthier: 25
|
||||
Paul Gauthier (aider): 9
|
||||
aider/queries/tree-sitter-language-pack/pony-tags.scm:
|
||||
Paul Gauthier: 20
|
||||
Paul Gauthier (aider): 19
|
||||
aider/queries/tree-sitter-language-pack/properties-tags.scm:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 2
|
||||
aider/queries/tree-sitter-language-pack/python-tags.scm:
|
||||
Paul Gauthier: 9
|
||||
Paul Gauthier (aider): 5
|
||||
aider/queries/tree-sitter-language-pack/r-tags.scm:
|
||||
Paul Gauthier: 17
|
||||
Paul Gauthier (aider): 4
|
||||
aider/queries/tree-sitter-language-pack/racket-tags.scm:
|
||||
Paul Gauthier: 10
|
||||
Paul Gauthier (aider): 2
|
||||
aider/queries/tree-sitter-language-pack/ruby-tags.scm:
|
||||
Paul Gauthier: 23
|
||||
Paul Gauthier (aider): 12
|
||||
aider/queries/tree-sitter-language-pack/rust-tags.scm:
|
||||
Paul Gauthier: 41
|
||||
Paul Gauthier (aider): 14
|
||||
aider/queries/tree-sitter-language-pack/solidity-tags.scm:
|
||||
Paul Gauthier: 30
|
||||
Paul Gauthier (aider): 13
|
||||
aider/queries/tree-sitter-language-pack/swift-tags.scm:
|
||||
Paul Gauthier: 39
|
||||
Paul Gauthier (aider): 12
|
||||
aider/queries/tree-sitter-language-pack/udev-tags.scm:
|
||||
Paul Gauthier: 15
|
||||
Paul Gauthier (aider): 5
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 9
|
||||
aider/watch.py:
|
||||
Yutaka Matsubara: 4
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 8
|
||||
scripts/redact-cast.py:
|
||||
Paul Gauthier: 27
|
||||
Paul Gauthier (aider): 98
|
||||
scripts/tsl_pack_langs.py:
|
||||
Paul Gauthier (aider): 145
|
||||
scripts/versionbump.py:
|
||||
Paul Gauthier (aider): 1
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier (aider): 104
|
||||
tests/basic/test_commands.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 190
|
||||
tests/basic/test_models.py:
|
||||
Paul Gauthier (aider): 44
|
||||
tests/basic/test_repomap.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 125
|
||||
tests/fixtures/languages/arduino/test.ino:
|
||||
Paul Gauthier (aider): 21
|
||||
tests/fixtures/languages/c/test.c:
|
||||
Paul Gauthier (aider): 12
|
||||
tests/fixtures/languages/chatito/test.chatito:
|
||||
Paul Gauthier (aider): 20
|
||||
tests/fixtures/languages/commonlisp/test.lisp:
|
||||
Paul Gauthier (aider): 17
|
||||
tests/fixtures/languages/d/test.d:
|
||||
Paul Gauthier (aider): 26
|
||||
tests/fixtures/languages/dart/test.dart:
|
||||
Paul Gauthier (aider): 21
|
||||
tests/fixtures/languages/elm/test.elm:
|
||||
Paul Gauthier (aider): 16
|
||||
tests/fixtures/languages/gleam/test.gleam:
|
||||
Paul Gauthier (aider): 10
|
||||
tests/fixtures/languages/lua/test.lua:
|
||||
Paul Gauthier (aider): 25
|
||||
tests/fixtures/languages/pony/test.pony:
|
||||
Paul Gauthier (aider): 8
|
||||
tests/fixtures/languages/properties/test.properties:
|
||||
Paul Gauthier (aider): 14
|
||||
tests/fixtures/languages/r/test.r:
|
||||
Paul Gauthier (aider): 17
|
||||
tests/fixtures/languages/racket/test.rkt:
|
||||
Paul Gauthier (aider): 8
|
||||
tests/fixtures/languages/solidity/test.sol:
|
||||
Paul Gauthier (aider): 21
|
||||
tests/fixtures/languages/swift/test.swift:
|
||||
Paul Gauthier (aider): 18
|
||||
tests/fixtures/languages/udev/test.rules:
|
||||
Paul Gauthier (aider): 22
|
||||
grand_total:
|
||||
Paul Gauthier: 542
|
||||
Paul Gauthier (aider): 1399
|
||||
Yutaka Matsubara: 4
|
||||
start_tag: v0.76.0
|
||||
total_lines: 1945
|
||||
- aider_percentage: 91.82
|
||||
aider_total: 2682
|
||||
end_date: '2025-03-21'
|
||||
end_tag: v0.78.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier (aider): 24
|
||||
Yutaka Matsubara: 2
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 6
|
||||
aider/commands.py:
|
||||
Carles Sala (aider): 30
|
||||
Paul Gauthier (aider): 10
|
||||
aider/help_pats.py:
|
||||
Paul Gauthier: 6
|
||||
aider/io.py:
|
||||
Marco Mayer: 2
|
||||
Paul Gauthier (aider): 17
|
||||
aider/main.py:
|
||||
Paul Gauthier: 5
|
||||
Paul Gauthier (aider): 29
|
||||
aider/mdstream.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 22
|
||||
aider/models.py:
|
||||
Paul Gauthier (aider): 41
|
||||
lentil32 (aider): 15
|
||||
aider/repo.py:
|
||||
Paul Gauthier (aider): 5
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 22
|
||||
aider/website/_includes/head_custom.html:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 53
|
||||
aider/website/_includes/recording.js:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 424
|
||||
aider/website/assets/asciinema/asciinema-player.min.js:
|
||||
Paul Gauthier: 1
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 173
|
||||
Paul Gauthier (aider): 371
|
||||
scripts/badges.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 496
|
||||
scripts/blame.py:
|
||||
Paul Gauthier: 2
|
||||
scripts/jekyll_run.sh:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 5
|
||||
scripts/logo_svg.py:
|
||||
Paul Gauthier: 5
|
||||
Paul Gauthier (aider): 169
|
||||
scripts/recording_audio.py:
|
||||
Paul Gauthier (aider): 338
|
||||
scripts/redact-cast.py:
|
||||
Paul Gauthier: 22
|
||||
Paul Gauthier (aider): 37
|
||||
scripts/tmux_record.sh:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 17
|
||||
scripts/update-docs.sh:
|
||||
Paul Gauthier: 1
|
||||
scripts/update-history.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 52
|
||||
tests/basic/test_aws_credentials.py:
|
||||
lentil32 (aider): 169
|
||||
tests/basic/test_commands.py:
|
||||
Carles Sala (aider): 40
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 193
|
||||
tests/basic/test_repo.py:
|
||||
Paul Gauthier (aider): 48
|
||||
tests/help/test_help.py:
|
||||
Paul Gauthier (aider): 49
|
||||
grand_total:
|
||||
Carles Sala (aider): 70
|
||||
Marco Mayer: 2
|
||||
Paul Gauthier: 235
|
||||
Paul Gauthier (aider): 2428
|
||||
Yutaka Matsubara: 2
|
||||
lentil32 (aider): 184
|
||||
start_tag: v0.77.0
|
||||
total_lines: 2921
|
||||
- aider_percentage: 65.38
|
||||
aider_total: 221
|
||||
end_date: '2025-03-25'
|
||||
end_tag: v0.79.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/coders/__init__.py:
|
||||
Paul Gauthier: 2
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 15
|
||||
Paul Gauthier (aider): 5
|
||||
aider/coders/context_coder.py:
|
||||
Paul Gauthier: 45
|
||||
Paul Gauthier (aider): 8
|
||||
aider/commands.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 20
|
||||
aider/io.py:
|
||||
Paul Gauthier: 11
|
||||
Paul Gauthier (aider): 2
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 4
|
||||
aider/models.py:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 1
|
||||
aider/repomap.py:
|
||||
Paul Gauthier: 17
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 13
|
||||
Paul Gauthier (aider): 10
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 16
|
||||
scripts/badges.py:
|
||||
Paul Gauthier (aider): 2
|
||||
scripts/blame.py:
|
||||
Paul Gauthier (aider): 16
|
||||
scripts/dl_icons.py:
|
||||
Paul Gauthier (aider): 60
|
||||
scripts/tmux_record.sh:
|
||||
Paul Gauthier: 1
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 77
|
||||
grand_total:
|
||||
Paul Gauthier: 117
|
||||
Paul Gauthier (aider): 221
|
||||
start_tag: v0.78.0
|
||||
total_lines: 338
|
||||
- aider_percentage: 86.86
|
||||
aider_total: 1837
|
||||
end_date: '2025-03-31'
|
||||
end_tag: v0.80.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 2
|
||||
aider/commands.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 20
|
||||
aider/exceptions.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 3
|
||||
aider/io.py:
|
||||
Andrey Ivanov: 2
|
||||
Matteo Landi (aider): 11
|
||||
Paul Gauthier (aider): 38
|
||||
aider/linter.py:
|
||||
Mir Adnan ALI: 2
|
||||
aider/main.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 21
|
||||
aider/mdstream.py:
|
||||
Peter Schilling (aider) (aider): 25
|
||||
aider/models.py:
|
||||
Paul Gauthier: 12
|
||||
Paul Gauthier (aider): 9
|
||||
aider/onboarding.py:
|
||||
Paul Gauthier: 44
|
||||
Paul Gauthier (aider): 389
|
||||
aider/queries/tree-sitter-languages/scala-tags.scm:
|
||||
Vasil Markoukin: 65
|
||||
aider/repo.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 7
|
||||
aider/repomap.py:
|
||||
Paul Gauthier (aider): 19
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier (aider): 13
|
||||
aider/scrape.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 1
|
||||
aider/utils.py:
|
||||
Paul Gauthier (aider): 5
|
||||
aider/watch.py:
|
||||
Matteo Landi (aider): 2
|
||||
aider/website/_includes/leaderboard.js:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 2
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 51
|
||||
Paul Gauthier (aider): 175
|
||||
scripts/30k-image.py:
|
||||
Paul Gauthier: 8
|
||||
Paul Gauthier (aider): 227
|
||||
scripts/homepage.py:
|
||||
Paul Gauthier (aider): 122
|
||||
tests/basic/test_commands.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 48
|
||||
tests/basic/test_exceptions.py:
|
||||
Paul Gauthier (aider): 17
|
||||
tests/basic/test_io.py:
|
||||
Paul Gauthier (aider): 28
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier: 15
|
||||
Paul Gauthier (aider): 199
|
||||
tests/basic/test_onboarding.py:
|
||||
Paul Gauthier (aider): 439
|
||||
tests/basic/test_repomap.py:
|
||||
Vasil Markoukin: 3
|
||||
tests/basic/test_ssl_verification.py:
|
||||
Paul Gauthier (aider): 8
|
||||
tests/basic/test_watch.py:
|
||||
Matteo Landi (aider): 9
|
||||
tests/fixtures/languages/scala/test.scala:
|
||||
Vasil Markoukin: 61
|
||||
grand_total:
|
||||
Andrey Ivanov: 2
|
||||
Matteo Landi (aider): 22
|
||||
Mir Adnan ALI: 2
|
||||
Paul Gauthier: 145
|
||||
Paul Gauthier (aider): 1790
|
||||
Peter Schilling (aider) (aider): 25
|
||||
Vasil Markoukin: 129
|
||||
start_tag: v0.79.0
|
||||
total_lines: 2115
|
||||
- aider_percentage: 85.55
|
||||
aider_total: 225
|
||||
end_date: '2025-04-04'
|
||||
end_tag: v0.81.0
|
||||
file_counts:
|
||||
.github/workflows/check_pypi_version.yml:
|
||||
Paul Gauthier: 11
|
||||
Paul Gauthier (aider): 75
|
||||
.github/workflows/windows_check_pypi_version.yml:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 86
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier (aider): 4
|
||||
aider/exceptions.py:
|
||||
Paul Gauthier: 6
|
||||
Paul Gauthier (aider): 12
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 40
|
||||
aider/models.py:
|
||||
Paul Gauthier (aider): 2
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 9
|
||||
Paul Gauthier (aider): 1
|
||||
aider/website/_includes/leaderboard.js:
|
||||
Paul Gauthier (aider): 5
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 3
|
||||
tests/basic/test_exceptions.py:
|
||||
Paul Gauthier: 3
|
||||
grand_total:
|
||||
Paul Gauthier: 38
|
||||
Paul Gauthier (aider): 225
|
||||
start_tag: v0.80.0
|
||||
total_lines: 263
|
||||
- aider_percentage: 91.85
|
||||
aider_total: 1567
|
||||
end_date: '2025-04-14'
|
||||
end_tag: v0.82.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args_formatter.py:
|
||||
Paul Gauthier (aider): 4
|
||||
aider/coders/__init__.py:
|
||||
Paul Gauthier (aider): 4
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 5
|
||||
aider/coders/editor_diff_fenced_coder.py:
|
||||
Paul Gauthier (aider): 9
|
||||
aider/coders/patch_coder.py:
|
||||
Paul Gauthier (aider): 679
|
||||
aider/coders/search_replace.py:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/models.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 25
|
||||
aider/resources/model-settings.yml:
|
||||
Felix Lisczyk: 13
|
||||
Paul Gauthier: 37
|
||||
Paul Gauthier (aider): 68
|
||||
aider/website/_includes/leaderboard.js:
|
||||
Paul Gauthier: 38
|
||||
Paul Gauthier (aider): 6
|
||||
aider/website/_includes/leaderboard_table.js:
|
||||
Paul Gauthier (aider): 518
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 15
|
||||
Paul Gauthier (aider): 209
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 28
|
||||
scripts/homepage.py:
|
||||
Paul Gauthier (aider): 2
|
||||
scripts/versionbump.py:
|
||||
Paul Gauthier (aider): 11
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 25
|
||||
grand_total:
|
||||
Felix Lisczyk: 13
|
||||
Paul Gauthier: 126
|
||||
Paul Gauthier (aider): 1567
|
||||
start_tag: v0.81.0
|
||||
total_lines: 1706
|
||||
- aider_percentage: 54.32
|
||||
aider_total: 1409
|
||||
end_date: '2025-05-09'
|
||||
end_tag: v0.83.0
|
||||
file_counts:
|
||||
.github/workflows/check_pypi_version.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
.github/workflows/pre-commit.yml:
|
||||
MDW: 48
|
||||
.github/workflows/ubuntu-tests.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
.github/workflows/windows-tests.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
.github/workflows/windows_check_pypi_version.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Andrew Grigorev: 21
|
||||
Andrew Grigorev (aider): 5
|
||||
Paul Gauthier (aider): 38
|
||||
aider/coders/__init__.py:
|
||||
Paul Gauthier (aider): 2
|
||||
aider/coders/base_coder.py:
|
||||
Andrew Grigorev (aider): 2
|
||||
Paul Gauthier: 60
|
||||
Paul Gauthier (aider): 104
|
||||
aider/coders/editblock_coder.py:
|
||||
Paul Gauthier: 10
|
||||
Paul Gauthier (aider): 7
|
||||
zjy1412: 2
|
||||
aider/coders/editblock_fenced_coder.py:
|
||||
MDW: 1
|
||||
aider/coders/help_coder.py:
|
||||
MDW: 1
|
||||
aider/coders/patch_coder.py:
|
||||
Paul Gauthier (aider): 38
|
||||
aider/coders/shell.py:
|
||||
Paul Gauthier: 37
|
||||
aider/coders/udiff_coder.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 9
|
||||
aider/coders/udiff_simple.py:
|
||||
Paul Gauthier (aider): 14
|
||||
aider/commands.py:
|
||||
Andrew Grigorev: 10
|
||||
Paul Gauthier: 7
|
||||
Paul Gauthier (aider): 1
|
||||
aider/gui.py:
|
||||
Jon Keys: 2
|
||||
aider/io.py:
|
||||
Kay Gosho: 1
|
||||
Paul Gauthier (aider): 5
|
||||
aider/linter.py:
|
||||
Paul Gauthier: 1
|
||||
Titusz Pan: 1
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 9
|
||||
aider/mdstream.py:
|
||||
Paul Gauthier (aider): 11
|
||||
aider/models.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 66
|
||||
Stefan Hladnik: 4
|
||||
Stefan Hladnik (aider): 41
|
||||
aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm:
|
||||
Andrey Popp: 98
|
||||
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm:
|
||||
Andrey Popp: 98
|
||||
aider/repo.py:
|
||||
Andrew Grigorev: 115
|
||||
Andrew Grigorev (aider): 21
|
||||
Paul Gauthier: 6
|
||||
Paul Gauthier (aider): 33
|
||||
aider/repomap.py:
|
||||
Paul Gauthier: 5
|
||||
Paul Gauthier (aider): 6
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 183
|
||||
Paul Gauthier (aider): 175
|
||||
cantalupo555: 1
|
||||
aider/scrape.py:
|
||||
Jon Keys: 12
|
||||
aider/utils.py:
|
||||
Paul Gauthier: 13
|
||||
Paul Gauthier (aider): 131
|
||||
Titusz Pan: 1
|
||||
aider/waiting.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 54
|
||||
aider/watch.py:
|
||||
Paul Gauthier: 6
|
||||
Paul Gauthier (aider): 7
|
||||
aider/website/_includes/leaderboard_table.js:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 18
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 2
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 13
|
||||
benchmark/benchmark.py:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 42
|
||||
benchmark/docker.sh:
|
||||
Paul Gauthier: 2
|
||||
benchmark/refactor_tools.py:
|
||||
MDW: 1
|
||||
scripts/30k-image.py:
|
||||
MDW: 1
|
||||
scripts/clean_metadata.py:
|
||||
Paul Gauthier (aider): 258
|
||||
scripts/update-history.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 7
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier (aider): 3
|
||||
tests/basic/test_commands.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 90
|
||||
tests/basic/test_editblock.py:
|
||||
Paul Gauthier: 10
|
||||
zjy1412: 52
|
||||
tests/basic/test_io.py:
|
||||
Paul Gauthier (aider): 132
|
||||
tests/basic/test_linter.py:
|
||||
Paul Gauthier: 22
|
||||
Titusz Pan: 10
|
||||
tests/basic/test_repo.py:
|
||||
Andrew Grigorev: 75
|
||||
Andrew Grigorev (aider): 65
|
||||
Paul Gauthier: 79
|
||||
Paul Gauthier (aider): 6
|
||||
tests/basic/test_repomap.py:
|
||||
Andrey Popp: 7
|
||||
tests/basic/test_watch.py:
|
||||
MDW: 1
|
||||
tests/fixtures/languages/ocaml_interface/test.mli:
|
||||
Andrey Popp: 14
|
||||
tests/scrape/test_playwright_disable.py:
|
||||
Andrew Grigorev: 111
|
||||
Paul Gauthier: 25
|
||||
Paul Gauthier (aider): 3
|
||||
grand_total:
|
||||
Andrew Grigorev: 332
|
||||
Andrew Grigorev (aider): 93
|
||||
Andrey Popp: 217
|
||||
Jon Keys: 14
|
||||
Kay Gosho: 1
|
||||
MDW: 53
|
||||
Paul Gauthier: 497
|
||||
Paul Gauthier (aider): 1275
|
||||
Stefan Hladnik: 4
|
||||
Stefan Hladnik (aider): 41
|
||||
Titusz Pan: 12
|
||||
cantalupo555: 1
|
||||
zjy1412: 54
|
||||
start_tag: v0.82.0
|
||||
total_lines: 2594
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
- dirname: 2025-02-25-20-23-07--gemini-pro
|
||||
test_cases: 225
|
||||
model: gemini/gemini-2.0-pro-exp-02-05
|
||||
model: Gemini 2.0 Pro exp-02-05
|
||||
edit_format: whole
|
||||
commit_hash: 2fccd47
|
||||
pass_rate_1: 20.4
|
||||
@@ -338,7 +338,7 @@
|
||||
|
||||
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
|
||||
test_cases: 225
|
||||
model: DeepSeek Chat V3
|
||||
model: DeepSeek Chat V3 (prev)
|
||||
edit_format: diff
|
||||
commit_hash: 0a23c4a-dirty
|
||||
pass_rate_1: 22.7
|
||||
@@ -643,7 +643,7 @@
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: "aider --model anthropic/claude-3-7-sonnet-20250219 # plus yml config"
|
||||
command: "aider --model anthropic/claude-3-7-sonnet-20250219 --thinking-tokens 32k"
|
||||
date: 2025-02-24
|
||||
versions: 0.75.1.dev
|
||||
seconds_per_case: 105.2
|
||||
@@ -729,28 +729,581 @@
|
||||
seconds_per_case: 137.4
|
||||
total_cost: 0
|
||||
|
||||
- dirname: 2025-03-13-20-46-30--cmda-whole
|
||||
- dirname: 2025-03-14-23-40-00--cmda-quality-whole2
|
||||
test_cases: 225
|
||||
model: command-a-03-2025
|
||||
model: command-a-03-2025-quality
|
||||
edit_format: whole
|
||||
commit_hash: 024b913-dirty
|
||||
commit_hash: a1aa63f
|
||||
pass_rate_1: 2.2
|
||||
pass_rate_2: 4.9
|
||||
pass_rate_2: 12.0
|
||||
pass_num_1: 5
|
||||
pass_num_2: 27
|
||||
percent_cases_well_formed: 99.6
|
||||
error_outputs: 2
|
||||
num_malformed_responses: 1
|
||||
num_with_malformed_responses: 1
|
||||
user_asks: 215
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: OPENAI_API_BASE=https://api.cohere.ai/compatibility/v1 aider --model openai/command-a-03-2025-quality
|
||||
date: 2025-03-14
|
||||
versions: 0.77.1.dev
|
||||
seconds_per_case: 85.1
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-03-15-01-21-24--gemma3-27b-or
|
||||
test_cases: 225
|
||||
model: gemma-3-27b-it
|
||||
edit_format: whole
|
||||
commit_hash: fd21f51-dirty
|
||||
pass_rate_1: 1.8
|
||||
pass_rate_2: 4.9
|
||||
pass_num_1: 4
|
||||
pass_num_2: 11
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 38
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 231
|
||||
user_asks: 181
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 3
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/google/gemma-3-27b-it
|
||||
date: 2025-03-15
|
||||
versions: 0.77.1.dev
|
||||
seconds_per_case: 79.7
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-03-24-15-41-33--deepseek-v3-0324-polyglot-diff
|
||||
test_cases: 225
|
||||
model: DeepSeek V3 (0324)
|
||||
edit_format: diff
|
||||
commit_hash: 502b863
|
||||
pass_rate_1: 28.0
|
||||
pass_rate_2: 55.1
|
||||
pass_num_1: 63
|
||||
pass_num_2: 124
|
||||
percent_cases_well_formed: 99.6
|
||||
error_outputs: 32
|
||||
num_malformed_responses: 1
|
||||
num_with_malformed_responses: 1
|
||||
user_asks: 96
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 2
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model deepseek/deepseek-chat
|
||||
date: 2025-03-24
|
||||
versions: 0.78.1.dev
|
||||
seconds_per_case: 290.0
|
||||
total_cost: 1.1164
|
||||
|
||||
- dirname: 2025-04-12-04-55-50--gemini-25-pro-diff-fenced
|
||||
test_cases: 225
|
||||
model: Gemini 2.5 Pro Preview 03-25
|
||||
edit_format: diff-fenced
|
||||
commit_hash: 0282574
|
||||
pass_rate_1: 40.9
|
||||
pass_rate_2: 72.9
|
||||
pass_num_1: 92
|
||||
pass_num_2: 164
|
||||
percent_cases_well_formed: 92.4
|
||||
error_outputs: 21
|
||||
num_malformed_responses: 21
|
||||
num_with_malformed_responses: 17
|
||||
user_asks: 69
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model cohere_chat/command-a-03-2025
|
||||
date: 2025-03-13
|
||||
versions: 0.76.3.dev
|
||||
seconds_per_case: 106.3
|
||||
total_cost: 0.0000
|
||||
command: aider --model gemini/gemini-2.5-pro-preview-03-25
|
||||
date: 2025-04-12
|
||||
versions: 0.81.3.dev
|
||||
seconds_per_case: 45.3
|
||||
total_cost: 0 # incorrect: 6.3174
|
||||
|
||||
- dirname: 2025-03-29-05-24-55--chatgpt4o-mar28-diff
|
||||
test_cases: 225
|
||||
model: chatgpt-4o-latest (2025-03-29)
|
||||
edit_format: diff
|
||||
commit_hash: 0decbad
|
||||
pass_rate_1: 16.4
|
||||
pass_rate_2: 45.3
|
||||
pass_num_1: 37
|
||||
pass_num_2: 102
|
||||
percent_cases_well_formed: 64.4
|
||||
error_outputs: 85
|
||||
num_malformed_responses: 85
|
||||
num_with_malformed_responses: 80
|
||||
user_asks: 174
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model chatgpt-4o-latest
|
||||
date: 2025-03-29
|
||||
versions: 0.79.3.dev
|
||||
seconds_per_case: 10.3
|
||||
total_cost: 19.7416
|
||||
|
||||
- dirname: 2025-04-04-02-57-25--qalpha-diff-exsys
|
||||
test_cases: 225
|
||||
model: Quasar Alpha
|
||||
edit_format: diff
|
||||
commit_hash: 8a34a6c-dirty
|
||||
pass_rate_1: 21.8
|
||||
pass_rate_2: 54.7
|
||||
pass_num_1: 49
|
||||
pass_num_2: 123
|
||||
percent_cases_well_formed: 98.2
|
||||
error_outputs: 4
|
||||
num_malformed_responses: 4
|
||||
num_with_malformed_responses: 4
|
||||
user_asks: 187
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/openrouter/quasar-alpha
|
||||
date: 2025-04-04
|
||||
versions: 0.80.5.dev
|
||||
seconds_per_case: 14.8
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-06-08-39-52--llama-4-maverick-17b-128e-instruct-polyglot-whole
|
||||
test_cases: 225
|
||||
model: Llama 4 Maverick
|
||||
edit_format: whole
|
||||
commit_hash: 9445a31
|
||||
pass_rate_1: 4.4
|
||||
pass_rate_2: 15.6
|
||||
pass_num_1: 10
|
||||
pass_num_2: 35
|
||||
percent_cases_well_formed: 99.1
|
||||
error_outputs: 12
|
||||
num_malformed_responses: 2
|
||||
num_with_malformed_responses: 2
|
||||
user_asks: 248
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model nvidia_nim/meta/llama-4-maverick-17b-128e-instruct
|
||||
date: 2025-04-06
|
||||
versions: 0.81.2.dev
|
||||
seconds_per_case: 20.5
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-10-04-21-31--grok3-diff-exuser
|
||||
test_cases: 225
|
||||
model: Grok 3 Beta
|
||||
edit_format: diff
|
||||
commit_hash: 2dd40fc-dirty
|
||||
pass_rate_1: 22.2
|
||||
pass_rate_2: 53.3
|
||||
pass_num_1: 50
|
||||
pass_num_2: 120
|
||||
percent_cases_well_formed: 99.6
|
||||
error_outputs: 1
|
||||
num_malformed_responses: 1
|
||||
num_with_malformed_responses: 1
|
||||
user_asks: 68
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/x-ai/grok-3-beta
|
||||
date: 2025-04-10
|
||||
versions: 0.81.2.dev
|
||||
seconds_per_case: 15.3
|
||||
total_cost: 11.0338
|
||||
|
||||
- dirname: 2025-04-10-18-47-24--grok3-mini-whole-exuser
|
||||
test_cases: 225
|
||||
model: Grok 3 Mini Beta (low)
|
||||
edit_format: whole
|
||||
commit_hash: 14ffe77-dirty
|
||||
pass_rate_1: 11.1
|
||||
pass_rate_2: 34.7
|
||||
pass_num_1: 25
|
||||
pass_num_2: 78
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 73
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/x-ai/grok-3-mini-beta
|
||||
date: 2025-04-10
|
||||
versions: 0.81.2.dev
|
||||
seconds_per_case: 35.1
|
||||
total_cost: 0.7856
|
||||
|
||||
- dirname: 2025-04-10-23-59-02--xai-grok3-mini-whole-high
|
||||
test_cases: 225
|
||||
model: Grok 3 Mini Beta (high)
|
||||
edit_format: whole
|
||||
commit_hash: 8ee33da-dirty
|
||||
pass_rate_1: 17.3
|
||||
pass_rate_2: 49.3
|
||||
pass_num_1: 39
|
||||
pass_num_2: 111
|
||||
percent_cases_well_formed: 99.6
|
||||
error_outputs: 1
|
||||
num_malformed_responses: 1
|
||||
num_with_malformed_responses: 1
|
||||
user_asks: 64
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 0
|
||||
total_tests: 225
|
||||
command: aider --model xai/grok-3-mini-beta --reasoning-effort high
|
||||
date: 2025-04-10
|
||||
versions: 0.81.3.dev
|
||||
seconds_per_case: 79.1
|
||||
total_cost: 0.7346
|
||||
|
||||
- dirname: 2025-04-10-19-02-44--oalpha-diff-exsys
|
||||
test_cases: 225
|
||||
model: Optimus Alpha
|
||||
edit_format: diff
|
||||
commit_hash: 532bc45-dirty
|
||||
pass_rate_1: 21.3
|
||||
pass_rate_2: 52.9
|
||||
pass_num_1: 48
|
||||
pass_num_2: 119
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 7
|
||||
num_malformed_responses: 6
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 182
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 3
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/openrouter/optimus-alpha
|
||||
date: 2025-04-10
|
||||
versions: 0.81.2.dev
|
||||
seconds_per_case: 18.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-14-21-05-54--gpt41-diff-exuser
|
||||
test_cases: 225
|
||||
model: gpt-4.1
|
||||
edit_format: diff
|
||||
commit_hash: 7a87db5-dirty
|
||||
pass_rate_1: 20.0
|
||||
pass_rate_2: 52.4
|
||||
pass_num_1: 45
|
||||
pass_num_2: 118
|
||||
percent_cases_well_formed: 98.2
|
||||
error_outputs: 6
|
||||
num_malformed_responses: 5
|
||||
num_with_malformed_responses: 4
|
||||
user_asks: 171
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model gpt-4.1
|
||||
date: 2025-04-14
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 20.5
|
||||
total_cost: 9.8556
|
||||
|
||||
- dirname: 2025-04-14-21-27-53--gpt41mini-diff
|
||||
test_cases: 225
|
||||
model: gpt-4.1-mini
|
||||
edit_format: diff
|
||||
commit_hash: ffb743e-dirty
|
||||
pass_rate_1: 11.1
|
||||
pass_rate_2: 32.4
|
||||
pass_num_1: 25
|
||||
pass_num_2: 73
|
||||
percent_cases_well_formed: 92.4
|
||||
error_outputs: 64
|
||||
num_malformed_responses: 62
|
||||
num_with_malformed_responses: 17
|
||||
user_asks: 159
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 2
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model gpt-4.1-mini
|
||||
date: 2025-04-14
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 19.5
|
||||
total_cost: 1.9918
|
||||
|
||||
- dirname: 2025-04-14-22-46-01--gpt41nano-diff
|
||||
test_cases: 225
|
||||
model: gpt-4.1-nano
|
||||
edit_format: whole
|
||||
commit_hash: 71d1591-dirty
|
||||
pass_rate_1: 3.1
|
||||
pass_rate_2: 8.9
|
||||
pass_num_1: 7
|
||||
pass_num_2: 20
|
||||
percent_cases_well_formed: 94.2
|
||||
error_outputs: 20
|
||||
num_malformed_responses: 20
|
||||
num_with_malformed_responses: 13
|
||||
user_asks: 316
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 8
|
||||
total_tests: 225
|
||||
command: aider --model gpt-4.1-nano
|
||||
date: 2025-04-14
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 12.0
|
||||
total_cost: 0.4281
|
||||
|
||||
- dirname: 2025-04-16-21-20-55--o3-high-diff-temp0-exsys
|
||||
test_cases: 225
|
||||
model: o3 (high)
|
||||
edit_format: diff
|
||||
commit_hash: 24805ff-dirty
|
||||
pass_rate_1: 36.9
|
||||
pass_rate_2: 79.6
|
||||
pass_num_1: 83
|
||||
pass_num_2: 179
|
||||
percent_cases_well_formed: 95.1
|
||||
error_outputs: 11
|
||||
num_malformed_responses: 11
|
||||
num_with_malformed_responses: 11
|
||||
user_asks: 110
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model o3
|
||||
date: 2025-04-16
|
||||
versions: 0.82.1.dev
|
||||
seconds_per_case: 113.8
|
||||
total_cost: 111.0325
|
||||
|
||||
- dirname: 2025-04-16-22-01-58--o4-mini-high-diff-exsys
|
||||
test_cases: 225
|
||||
model: o4-mini (high)
|
||||
edit_format: diff
|
||||
commit_hash: b66901f-dirty
|
||||
pass_rate_1: 19.6
|
||||
pass_rate_2: 72.0
|
||||
pass_num_1: 44
|
||||
pass_num_2: 162
|
||||
percent_cases_well_formed: 90.7
|
||||
error_outputs: 26
|
||||
num_malformed_responses: 24
|
||||
num_with_malformed_responses: 21
|
||||
user_asks: 66
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model o4-mini
|
||||
date: 2025-04-16
|
||||
versions: 0.82.1.dev
|
||||
seconds_per_case: 176.5
|
||||
total_cost: 19.6399
|
||||
|
||||
- dirname: 2025-04-17-01-20-35--o3-mini-high-diff-arch
|
||||
test_cases: 225
|
||||
model: o3 (high) + gpt-4.1
|
||||
edit_format: architect
|
||||
commit_hash: 80909e1-dirty
|
||||
editor_model: gpt-4.1
|
||||
editor_edit_format: editor-diff
|
||||
pass_rate_1: 36.0
|
||||
pass_rate_2: 82.7
|
||||
pass_num_1: 81
|
||||
pass_num_2: 186
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 9
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 166
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 0
|
||||
total_tests: 225
|
||||
command: aider --model o3 --architect
|
||||
date: 2025-04-17
|
||||
versions: 0.82.2.dev
|
||||
seconds_per_case: 110.0
|
||||
total_cost: 69.2921
|
||||
|
||||
- dirname: 2025-04-19-14-43-04--o4-mini-patch
|
||||
test_cases: 225
|
||||
model: openhands-lm-32b-v0.1
|
||||
edit_format: whole
|
||||
commit_hash: c08336f
|
||||
pass_rate_1: 4.0
|
||||
pass_rate_2: 10.2
|
||||
pass_num_1: 9
|
||||
pass_num_2: 23
|
||||
percent_cases_well_formed: 95.1
|
||||
error_outputs: 55
|
||||
num_malformed_responses: 41
|
||||
num_with_malformed_responses: 11
|
||||
user_asks: 166
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 11
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/all-hands/openhands-lm-32b-v0.1
|
||||
date: 2025-04-19
|
||||
versions: 0.82.2.dev
|
||||
seconds_per_case: 195.6
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-20-19-54-31--flash25-diff-no-think
|
||||
test_cases: 225
|
||||
model: gemini-2.5-flash-preview-04-17 (default)
|
||||
edit_format: diff
|
||||
commit_hash: 7fcce5d-dirty
|
||||
pass_rate_1: 21.8
|
||||
pass_rate_2: 47.1
|
||||
pass_num_1: 49
|
||||
pass_num_2: 106
|
||||
percent_cases_well_formed: 85.3
|
||||
error_outputs: 60
|
||||
num_malformed_responses: 55
|
||||
num_with_malformed_responses: 33
|
||||
user_asks: 82
|
||||
lazy_comments: 1
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 5
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.5-flash-preview-04-17
|
||||
date: 2025-04-20
|
||||
versions: 0.82.3.dev
|
||||
seconds_per_case: 50.1
|
||||
total_cost: 1.8451
|
||||
|
||||
- dirname: 2025-05-07-19-32-40--gemini0506-diff-fenced-completion_cost
|
||||
test_cases: 225
|
||||
model: Gemini 2.5 Pro Preview 05-06
|
||||
edit_format: diff-fenced
|
||||
commit_hash: 3b08327-dirty
|
||||
pass_rate_1: 36.4
|
||||
pass_rate_2: 76.9
|
||||
pass_num_1: 82
|
||||
pass_num_2: 173
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 15
|
||||
num_malformed_responses: 7
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 105
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.5-pro-preview-05-06
|
||||
date: 2025-05-07
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 165.3
|
||||
total_cost: 37.4104
|
||||
|
||||
- dirname: 2025-05-08-03-20-24--qwen3-32b-default
|
||||
test_cases: 225
|
||||
model: Qwen3 32B
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty, aeaf259
|
||||
pass_rate_1: 14.2
|
||||
pass_rate_2: 40.0
|
||||
pass_num_1: 32
|
||||
pass_num_2: 90
|
||||
percent_cases_well_formed: 83.6
|
||||
error_outputs: 119
|
||||
num_malformed_responses: 50
|
||||
num_with_malformed_responses: 37
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 12
|
||||
prompt_tokens: 317591
|
||||
completion_tokens: 120418
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-32b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 372.2
|
||||
total_cost: 0.7603
|
||||
|
||||
- dirname: 2025-05-09-17-02-02--qwen3-235b-a22b.unthink_16k_diff
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff, no think, Alibaba API
|
||||
edit_format: diff
|
||||
commit_hash: 91d7fbd-dirty
|
||||
pass_rate_1: 28.9
|
||||
pass_rate_2: 59.6
|
||||
pass_num_1: 65
|
||||
pass_num_2: 134
|
||||
percent_cases_well_formed: 92.9
|
||||
error_outputs: 22
|
||||
num_malformed_responses: 22
|
||||
num_with_malformed_responses: 16
|
||||
user_asks: 111
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2816192
|
||||
completion_tokens: 342062
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/qwen3-235b-a22b
|
||||
date: 2025-05-09
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 45.4
|
||||
total_cost: 0.0000
|
||||
|
||||
272
aider/website/_data/qwen3_leaderboard.yml
Normal file
272
aider/website/_data/qwen3_leaderboard.yml
Normal file
@@ -0,0 +1,272 @@
|
||||
- dirname: 2025-05-08-03-20-24--qwen3-32b-default
|
||||
test_cases: 225
|
||||
model: Qwen3 32B diff on OpenRouter, all providers, default settings (thinking)
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty, aeaf259
|
||||
pass_rate_1: 14.2
|
||||
pass_rate_2: 40.0
|
||||
pass_num_1: 32
|
||||
pass_num_2: 90
|
||||
percent_cases_well_formed: 83.6
|
||||
error_outputs: 119
|
||||
num_malformed_responses: 50
|
||||
num_with_malformed_responses: 37
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 12
|
||||
prompt_tokens: 317591
|
||||
completion_tokens: 120418
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-32b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 372.2
|
||||
total_cost: 0.7603
|
||||
|
||||
- dirname: 2025-05-08-03-22-37--qwen3-235b-defaults
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff on OpenRouter, all providers, default settings (thinking)
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty
|
||||
pass_rate_1: 17.3
|
||||
pass_rate_2: 49.8
|
||||
pass_num_1: 39
|
||||
pass_num_2: 112
|
||||
percent_cases_well_formed: 91.6
|
||||
error_outputs: 58
|
||||
num_malformed_responses: 29
|
||||
num_with_malformed_responses: 19
|
||||
user_asks: 102
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 0
|
||||
completion_tokens: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 428.1
|
||||
total_cost: 1.8037
|
||||
|
||||
|
||||
- dirname: 2025-05-08-17-39-14--qwen3-235b-or-together-only
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff on OpenRouter only TogetherAI, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 328584e
|
||||
pass_rate_1: 28.0
|
||||
pass_rate_2: 54.7
|
||||
pass_num_1: 63
|
||||
pass_num_2: 123
|
||||
percent_cases_well_formed: 90.7
|
||||
error_outputs: 39
|
||||
num_malformed_responses: 32
|
||||
num_with_malformed_responses: 21
|
||||
user_asks: 106
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2816606
|
||||
completion_tokens: 362346
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 77.2
|
||||
total_cost: 0.6399
|
||||
|
||||
|
||||
- dirname: 2025-04-30-04-49-37--Qwen3-235B-A22B-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B whole with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 28.0
|
||||
pass_rate_2: 65.3
|
||||
pass_num_1: 63
|
||||
pass_num_2: 147
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 166
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 0
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 166.0
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-49-50--Qwen3-235B-A22B-diff-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B diff with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 29.8
|
||||
pass_rate_2: 61.3
|
||||
pass_num_1: 67
|
||||
pass_num_2: 138
|
||||
percent_cases_well_formed: 94.7
|
||||
error_outputs: 25
|
||||
num_malformed_responses: 25
|
||||
num_with_malformed_responses: 12
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 158.2
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-08-41--Qwen3-32B-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-32B whole with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 45.8
|
||||
pass_num_1: 46
|
||||
pass_num_2: 103
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 94
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-32B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 48.1
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-08-51--Qwen3-32B-diff-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-32B diff with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 41.3
|
||||
pass_num_1: 46
|
||||
pass_num_2: 93
|
||||
percent_cases_well_formed: 94.2
|
||||
error_outputs: 17
|
||||
num_malformed_responses: 14
|
||||
num_with_malformed_responses: 13
|
||||
user_asks: 83
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-32B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 59.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-05-07-03-15-59--Qwen3-235B-A22B-Q5_K_M-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B whole with llama.cpp, Q5_K_M (unsloth), recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 8159cbf
|
||||
pass_rate_1: 27.1
|
||||
pass_rate_2: 59.1
|
||||
pass_num_1: 61
|
||||
pass_num_2: 133
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 1
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 169
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B-Q5_K_M
|
||||
date: 2025-05-07
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 635.2
|
||||
total_cost: 0.0000
|
||||
|
||||
|
||||
- dirname: 2025-05-09-17-02-02--qwen3-235b-a22b.unthink_16k_diff
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff, no think, via official Alibaba API
|
||||
edit_format: diff
|
||||
commit_hash: 91d7fbd-dirty
|
||||
pass_rate_1: 28.9
|
||||
pass_rate_2: 59.6
|
||||
pass_num_1: 65
|
||||
pass_num_2: 134
|
||||
percent_cases_well_formed: 92.9
|
||||
error_outputs: 22
|
||||
num_malformed_responses: 22
|
||||
num_with_malformed_responses: 16
|
||||
user_asks: 111
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2816192
|
||||
completion_tokens: 342062
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/qwen3-235b-a22b
|
||||
date: 2025-05-09
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 45.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-05-09-23-01-22--qwen3-235b-a22b.unthink_16k_whole
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B whole, no think, via official Alibaba API
|
||||
edit_format: whole
|
||||
commit_hash: 425fb6d
|
||||
pass_rate_1: 26.7
|
||||
pass_rate_2: 61.8
|
||||
pass_num_1: 60
|
||||
pass_num_2: 139
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 0
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 175
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2768173
|
||||
completion_tokens: 384000
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/qwen3-235b-a22b
|
||||
date: 2025-05-09
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 50.8
|
||||
total_cost: 0.0000
|
||||
@@ -27,7 +27,7 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
labels: labels,
|
||||
datasets: [{
|
||||
label: 'Aider\'s percent of new code by release',
|
||||
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.aider_percentage }}, lines: {{ row.aider_total }} },{% endfor %}],
|
||||
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.aider_percentage }}, lines: {{ row.aider_total }}, end_date: '{{ row.end_date }}' },{% endfor %}],
|
||||
backgroundColor: 'rgba(54, 162, 235, 0.8)',
|
||||
borderColor: 'rgba(54, 162, 235, 1)',
|
||||
borderWidth: 1
|
||||
@@ -88,6 +88,10 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
var value = context.parsed.y || 0;
|
||||
var lines = context.raw.lines || 0;
|
||||
return `${label}: ${Math.round(value)}% (${lines} lines)`;
|
||||
},
|
||||
afterLabel: function(context) {
|
||||
let date = context.raw.end_date || 'n/a';
|
||||
return `Date: ` + date;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,25 +1,22 @@
|
||||
|
||||
If you already have python 3.8-3.13 installed, you can get started quickly like this:
|
||||
If you already have python 3.8-3.13 installed, you can get started quickly like this.
|
||||
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
Start working with aider on your codebase:
|
||||
|
||||
```bash
|
||||
python -m pip install aider-install
|
||||
aider-install
|
||||
|
||||
# Change directory into your code base
|
||||
# Change directory into your codebase
|
||||
cd /to/your/project
|
||||
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
# DeepSeek
|
||||
aider --model deepseek --api-key deepseek=<key>
|
||||
|
||||
# Work with Claude 3.7 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
# Claude 3.7 Sonnet
|
||||
aider --model sonnet --api-key anthropic=<key>
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
# o3-mini
|
||||
aider --model o3-mini --api-key openai=<key>
|
||||
```
|
||||
|
||||
@@ -5,10 +5,66 @@
|
||||
<meta property="og:image" content="{{ site.url }}/assets/aider.jpg">
|
||||
<meta property="twitter:image" content="{{ site.url }}/assets/aider-square.jpg">
|
||||
{% endif %}
|
||||
|
||||
<!-- Custom site title styling -->
|
||||
<style>
|
||||
@font-face {
|
||||
font-family: GlassTTYVT220;
|
||||
src: local("Glass TTY VT220"), local("Glass TTY VT220 Medium"), url(/assets/Glass_TTY_VT220.ttf) format("truetype");
|
||||
}
|
||||
|
||||
.site-title {
|
||||
font-size: 1.8rem;
|
||||
font-weight: 700;
|
||||
font-family: 'GlassTTYVT220', monospace;
|
||||
color: #14b014; /* terminal green color */
|
||||
text-decoration: none;
|
||||
letter-spacing: 0.5px;
|
||||
}
|
||||
|
||||
/* For SVG logo inside site-title */
|
||||
.site-title img {
|
||||
height: 1.8rem;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
/* Sidebar gradient styling to match hero section */
|
||||
.side-bar {
|
||||
background: linear-gradient(135deg, #ffffff 0%, rgba(20, 176, 20, 0.01) 25%, rgba(20, 176, 20, 0.04) 40%, rgba(220, 230, 255, 0.4) 60%, rgba(205, 218, 255, 0.4) 80%, #F5F6FA 100%);
|
||||
}
|
||||
</style>
|
||||
<link rel="alternate" type="application/rss+xml" title="RSS Feed" href="{{ site.url }}/feed.xml">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com">
|
||||
<link rel="preload" href="https://fonts.googleapis.com/css?family=Open+Sans:400,700&display=swap" as="style" type="text/css" crossorigin>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<!-- Logo Progressive Enhancement for Jekyll pages -->
|
||||
<script>
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
const siteTitle = document.querySelector('.site-title');
|
||||
if (siteTitle) {
|
||||
const textContent = siteTitle.textContent; // Save the text for fallback
|
||||
|
||||
// Create new image element
|
||||
const logoImg = new Image();
|
||||
logoImg.src = '/assets/logo.svg';
|
||||
logoImg.alt = 'Aider Logo';
|
||||
logoImg.style.height = '1.8rem';
|
||||
logoImg.style.verticalAlign = 'middle';
|
||||
|
||||
// Only replace if image loads successfully
|
||||
logoImg.onload = function() {
|
||||
siteTitle.textContent = ''; // Clear text
|
||||
siteTitle.appendChild(logoImg);
|
||||
};
|
||||
|
||||
// If image fails to load, do nothing (keep the text)
|
||||
logoImg.onerror = function() {
|
||||
console.log('SVG logo failed to load, keeping text fallback');
|
||||
};
|
||||
}
|
||||
});
|
||||
</script>
|
||||
<meta name="theme-color" content="#157878">
|
||||
<meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="{{ '/assets/icons/favicon-32x32.png' | relative_url }}">
|
||||
|
||||
@@ -2,7 +2,7 @@ If you need more help, please check our
|
||||
[GitHub issues](https://github.com/Aider-AI/aider/issues)
|
||||
and file a new issue if your problem isn't discussed.
|
||||
Or drop into our
|
||||
[Discord](https://discord.gg/Tv2uQnR88V)
|
||||
[Discord](https://discord.gg/Y7X7bhMQFV)
|
||||
to chat with us.
|
||||
|
||||
When reporting problems, it is very helpful if you can provide:
|
||||
|
||||
5
aider/website/_includes/install.md
Normal file
5
aider/website/_includes/install.md
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
```bash
|
||||
python -m pip install aider-install
|
||||
aider-install
|
||||
```
|
||||
@@ -4,7 +4,11 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
const redDiagonalPattern = pattern.draw('diagonal', 'rgba(255, 99, 132, 0.2)');
|
||||
let displayedData = [];
|
||||
|
||||
const HIGHLIGHT_MODEL = '{{ highlight_model | default: "no no no" }}';
|
||||
// Get highlight model from query string or Jekyll variable
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
const queryHighlight = urlParams.get('highlight');
|
||||
const HIGHLIGHT_MODEL = queryHighlight || '{{ highlight_model | default: "no no no" }}';
|
||||
|
||||
var leaderboardData = {
|
||||
labels: [],
|
||||
datasets: [{
|
||||
@@ -13,14 +17,14 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
backgroundColor: function(context) {
|
||||
const row = allData[context.dataIndex];
|
||||
if (row && row.edit_format === 'whole') {
|
||||
return diagonalPattern;
|
||||
return redDiagonalPattern; // Use red pattern for highlighted whole format
|
||||
}
|
||||
const label = leaderboardData.labels[context.dataIndex] || '';
|
||||
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
|
||||
return (label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase())) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
|
||||
},
|
||||
borderColor: function(context) {
|
||||
const label = context.chart.data.labels[context.dataIndex] || '';
|
||||
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
|
||||
return (label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase())) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
|
||||
},
|
||||
borderWidth: 1
|
||||
}, {
|
||||
@@ -74,11 +78,13 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
leaderboardChart.render();
|
||||
}
|
||||
|
||||
// Use displayedData in the backgroundColor callback instead of allData
|
||||
// Update backgroundColor and borderColor for the main dataset based on displayedData
|
||||
leaderboardData.datasets[0].backgroundColor = function(context) {
|
||||
const row = displayedData[context.dataIndex];
|
||||
const label = leaderboardData.labels[context.dataIndex] || '';
|
||||
if (label && label.includes(HIGHLIGHT_MODEL)) {
|
||||
const isHighlighted = label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase());
|
||||
|
||||
if (isHighlighted) {
|
||||
if (row && row.edit_format === 'whole') return redDiagonalPattern;
|
||||
else return 'rgba(255, 99, 132, 0.2)';
|
||||
} else if (row && row.edit_format === 'whole') {
|
||||
@@ -171,6 +177,9 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
},
|
||||
x: {
|
||||
ticks: {
|
||||
autoSkip: false, // Prevent labels from being automatically skipped
|
||||
maxRotation: 90, // Allow labels to rotate up to 90 degrees
|
||||
minRotation: 0,
|
||||
callback: function(value, index) {
|
||||
const label = this.getLabelForValue(value);
|
||||
if (label.length <= "claude-3-5-sonnet".length) {
|
||||
|
||||
520
aider/website/_includes/leaderboard_table.js
Normal file
520
aider/website/_includes/leaderboard_table.js
Normal file
@@ -0,0 +1,520 @@
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
let currentMode = 'view'; // 'view', 'select', 'detail'
|
||||
let selectedRows = new Set(); // Store indices of selected rows
|
||||
const MAX_DISPLAY_COST_CAP = 75; // Define the constant here
|
||||
|
||||
const allMainRows = document.querySelectorAll('tr[id^="main-row-"]');
|
||||
const allDetailsRows = document.querySelectorAll('tr[id^="details-"]');
|
||||
const searchInput = document.getElementById('editSearchInput');
|
||||
const modeViewButton = document.getElementById('mode-view-btn');
|
||||
const modeDetailButton = document.getElementById('mode-detail-btn');
|
||||
const modeSelectButton = document.getElementById('mode-select-btn');
|
||||
const modeButtons = [modeViewButton, modeSelectButton, modeDetailButton];
|
||||
const selectAllCheckbox = document.getElementById('select-all-checkbox');
|
||||
const leaderboardTitle = document.getElementById('leaderboard-title'); // Get title element
|
||||
const defaultTitle = "Aider polyglot coding leaderboard";
|
||||
const filteredTitle = "Aider polyglot coding benchmark results (selected)";
|
||||
|
||||
function applySearchFilter() {
|
||||
const searchTerm = searchInput.value.toLowerCase();
|
||||
allMainRows.forEach(row => {
|
||||
const textContent = row.textContent.toLowerCase();
|
||||
const detailsRow = document.getElementById(row.id.replace('main-row-', 'details-'));
|
||||
const matchesSearch = textContent.includes(searchTerm);
|
||||
|
||||
if (matchesSearch) {
|
||||
row.classList.remove('hidden-by-search');
|
||||
if (detailsRow) detailsRow.classList.remove('hidden-by-search');
|
||||
} else {
|
||||
row.classList.add('hidden-by-search');
|
||||
if (detailsRow) detailsRow.classList.add('hidden-by-search');
|
||||
}
|
||||
});
|
||||
// After applying search filter, re-apply view mode filter and update select-all state
|
||||
updateTableView(currentMode);
|
||||
if (currentMode === 'select') {
|
||||
updateSelectAllCheckboxState();
|
||||
}
|
||||
|
||||
// Update cost bars and ticks since visible rows may have changed
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
}
|
||||
|
||||
function getVisibleMainRows() {
|
||||
// Helper to get rows currently visible (not hidden by search or mode)
|
||||
return Array.from(allMainRows).filter(row =>
|
||||
!row.classList.contains('hidden-by-search') && !row.classList.contains('hidden-by-mode')
|
||||
);
|
||||
}
|
||||
|
||||
function updateSelectAllCheckboxState() {
|
||||
// Update the header checkbox based on the selection state of *visible* rows
|
||||
if (currentMode !== 'select') return; // Only relevant in select mode
|
||||
|
||||
const visibleRows = getVisibleMainRows();
|
||||
const visibleRowCount = visibleRows.length;
|
||||
const selectedVisibleRowCount = visibleRows.filter(row => selectedRows.has(row.querySelector('.row-selector')?.dataset.rowIndex)).length;
|
||||
|
||||
if (visibleRowCount === 0) {
|
||||
selectAllCheckbox.checked = false;
|
||||
selectAllCheckbox.indeterminate = false;
|
||||
} else if (selectedVisibleRowCount === visibleRowCount) {
|
||||
selectAllCheckbox.checked = true;
|
||||
selectAllCheckbox.indeterminate = false;
|
||||
} else if (selectedVisibleRowCount > 0) {
|
||||
selectAllCheckbox.checked = false;
|
||||
selectAllCheckbox.indeterminate = true;
|
||||
} else {
|
||||
selectAllCheckbox.checked = false;
|
||||
selectAllCheckbox.indeterminate = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function updateTableView(mode) {
|
||||
currentMode = mode; // Update global state ('view', 'select', 'detail')
|
||||
|
||||
// Update button styles first
|
||||
modeButtons.forEach(btn => {
|
||||
btn.classList.remove('active');
|
||||
// Reset specific styles potentially added by .active
|
||||
btn.style.backgroundColor = '';
|
||||
btn.style.color = '';
|
||||
});
|
||||
let activeButton;
|
||||
if (mode === 'view') activeButton = modeViewButton;
|
||||
else if (mode === 'select') activeButton = modeSelectButton;
|
||||
else if (mode === 'detail') activeButton = modeDetailButton;
|
||||
|
||||
activeButton.classList.add('active');
|
||||
activeButton.style.backgroundColor = '#e7f3ff'; // Use selected row highlight blue
|
||||
activeButton.style.color = '#495057'; // Use dark text for contrast on light blue
|
||||
|
||||
// Get the first header cell (for the toggle/checkbox column)
|
||||
const firstHeaderCell = document.querySelector('table thead th:first-child');
|
||||
|
||||
// Show/hide header checkbox based on mode
|
||||
selectAllCheckbox.style.display = mode === 'select' ? 'inline-block' : 'none';
|
||||
|
||||
allMainRows.forEach(row => {
|
||||
const rowIndex = row.querySelector('.row-selector')?.dataset.rowIndex;
|
||||
const toggleButton = row.querySelector('.toggle-details');
|
||||
const selectorCheckbox = row.querySelector('.row-selector');
|
||||
const firstCell = row.querySelector('td:first-child'); // Get the first cell of the main row
|
||||
const detailsRow = document.getElementById(`details-${rowIndex}`);
|
||||
const isSelected = selectedRows.has(rowIndex);
|
||||
|
||||
// Reset visibility classes before applying mode logic
|
||||
row.classList.remove('hidden-by-mode');
|
||||
if (detailsRow) detailsRow.classList.remove('hidden-by-mode');
|
||||
|
||||
// Show/hide the first column (header and data cells) based on mode
|
||||
if (firstHeaderCell) {
|
||||
firstHeaderCell.style.display = mode === 'view' ? 'none' : '';
|
||||
}
|
||||
if (firstCell) {
|
||||
firstCell.style.display = mode === 'view' ? 'none' : '';
|
||||
}
|
||||
|
||||
// Apply mode-specific logic
|
||||
if (mode === 'view') { // --- VIEW MODE ---
|
||||
toggleButton.style.display = 'none'; // Hide toggle in view mode
|
||||
selectorCheckbox.style.display = 'none';
|
||||
row.classList.remove('row-selected'); // Ensure no selection highlight
|
||||
// view-highlighted is handled by row click listener
|
||||
|
||||
// In 'view' mode, hide row if selections exist AND this row is NOT selected
|
||||
if (selectedRows.size > 0 && !isSelected) {
|
||||
row.classList.add('hidden-by-mode');
|
||||
if (detailsRow) detailsRow.classList.add('hidden-by-mode');
|
||||
} else {
|
||||
// Ensure row is not hidden by mode if it's selected or no selections exist
|
||||
// This is handled by the reset at the start of the loop:
|
||||
// row.classList.remove('hidden-by-mode');
|
||||
// if (detailsRow) detailsRow.classList.remove('hidden-by-mode');
|
||||
}
|
||||
// Always hide details row content in view mode regardless of visibility class
|
||||
if (detailsRow) {
|
||||
detailsRow.style.display = 'none';
|
||||
}
|
||||
|
||||
} else if (mode === 'select') { // --- SELECT MODE ---
|
||||
toggleButton.style.display = 'none';
|
||||
selectorCheckbox.style.display = 'inline-block';
|
||||
selectorCheckbox.checked = isSelected;
|
||||
row.classList.toggle('row-selected', isSelected);
|
||||
row.classList.remove('view-highlighted'); // Clear view highlight when switching to select
|
||||
// Always hide details row in select mode
|
||||
if (detailsRow) detailsRow.style.display = 'none';
|
||||
|
||||
// In 'select' mode, no rows should be hidden based on selection status
|
||||
row.classList.remove('hidden-by-mode');
|
||||
if (detailsRow) detailsRow.classList.remove('hidden-by-mode');
|
||||
|
||||
} else { // --- DETAIL MODE --- (mode === 'detail')
|
||||
toggleButton.style.display = 'inline-block'; // Show toggle
|
||||
selectorCheckbox.style.display = 'none';
|
||||
row.classList.remove('row-selected'); // Clear selection highlight
|
||||
row.classList.remove('view-highlighted'); // Clear view highlight when switching to detail
|
||||
// Details row visibility is controlled by the toggle button state, don't force hide/show here
|
||||
// Ensure main row is visible if not hidden by search
|
||||
row.classList.remove('hidden-by-mode');
|
||||
if (detailsRow) {
|
||||
detailsRow.classList.remove('hidden-by-mode');
|
||||
// Preserve existing display state (controlled by toggle) unless hidden by search
|
||||
if (detailsRow.classList.contains('hidden-by-search')) {
|
||||
detailsRow.style.display = 'none';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Ensure rows hidden by search remain hidden regardless of mode
|
||||
if (row.classList.contains('hidden-by-search')) {
|
||||
row.style.display = 'none';
|
||||
if (detailsRow) detailsRow.style.display = 'none';
|
||||
} else if (!row.classList.contains('hidden-by-mode')) {
|
||||
// Make row visible if not hidden by search or mode
|
||||
row.style.display = ''; // Or 'table-row' if needed, but '' usually works
|
||||
} else {
|
||||
// Row is hidden by mode, ensure it's hidden
|
||||
row.style.display = 'none';
|
||||
if (detailsRow) detailsRow.style.display = 'none';
|
||||
}
|
||||
|
||||
|
||||
});
|
||||
|
||||
// Update the leaderboard title based on mode and selection
|
||||
if (leaderboardTitle) {
|
||||
// Check if a custom title is provided globally
|
||||
if (typeof LEADERBOARD_CUSTOM_TITLE !== 'undefined' && LEADERBOARD_CUSTOM_TITLE) {
|
||||
leaderboardTitle.textContent = LEADERBOARD_CUSTOM_TITLE;
|
||||
} else {
|
||||
if (currentMode === 'view' && selectedRows.size > 0) {
|
||||
leaderboardTitle.textContent = filteredTitle;
|
||||
} else {
|
||||
leaderboardTitle.textContent = defaultTitle;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the select-all checkbox state after updating the view
|
||||
updateSelectAllCheckboxState();
|
||||
|
||||
// Update cost bars and ticks since visible/selected rows may have changed
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
}
|
||||
|
||||
|
||||
// --- Existing Initializations ---
|
||||
// Add percentage ticks
|
||||
const percentCells = document.querySelectorAll('.bar-cell:not(.cost-bar-cell)');
|
||||
percentCells.forEach(cell => {
|
||||
// Add ticks at 0%, 10%, 20%, ..., 100%
|
||||
for (let i = 0; i <= 100; i += 10) {
|
||||
const tick = document.createElement('div');
|
||||
tick.className = 'percent-tick';
|
||||
tick.style.left = `${i}%`;
|
||||
cell.appendChild(tick);
|
||||
}
|
||||
});
|
||||
|
||||
// Function to calculate the appropriate max display cost based on visible/selected entries
|
||||
function calculateDisplayMaxCost() {
|
||||
// Get the appropriate set of rows based on the current mode and selection state
|
||||
let rowsToConsider;
|
||||
|
||||
if (currentMode === 'view' && selectedRows.size > 0) {
|
||||
// In view mode with selections, only consider selected rows
|
||||
rowsToConsider = Array.from(allMainRows).filter(row => {
|
||||
const rowIndex = row.querySelector('.row-selector')?.dataset.rowIndex;
|
||||
return rowIndex && selectedRows.has(rowIndex) && !row.classList.contains('hidden-by-search');
|
||||
});
|
||||
} else {
|
||||
// In other modes or without selections, consider all visible rows
|
||||
rowsToConsider = getVisibleMainRows();
|
||||
}
|
||||
|
||||
// Find the maximum cost among the rows to consider
|
||||
let maxCost = 0;
|
||||
rowsToConsider.forEach(row => {
|
||||
const costBar = row.querySelector('.cost-bar');
|
||||
if (costBar) {
|
||||
const cost = parseFloat(costBar.dataset.cost || '0');
|
||||
if (cost > maxCost) maxCost = cost;
|
||||
}
|
||||
});
|
||||
|
||||
// Cap at MAX_DISPLAY_COST_CAP if any entries exceed that amount, otherwise use actual max
|
||||
return maxCost > MAX_DISPLAY_COST_CAP ? MAX_DISPLAY_COST_CAP : Math.max(1, maxCost); // Ensure at least 1 to avoid division by zero
|
||||
}
|
||||
|
||||
// Process cost bars with dynamic scale
|
||||
function updateCostBars() {
|
||||
const costBars = document.querySelectorAll('.cost-bar');
|
||||
const currentMaxDisplayCost = calculateDisplayMaxCost();
|
||||
|
||||
// Remove existing special indicators first
|
||||
document.querySelectorAll('.dark-section, .tear-line').forEach(el => el.remove());
|
||||
|
||||
costBars.forEach(bar => {
|
||||
const cost = parseFloat(bar.dataset.cost);
|
||||
|
||||
if (cost > 0) {
|
||||
// Calculate percentage based on the dynamic display max
|
||||
const percent = Math.min(cost, currentMaxDisplayCost) / currentMaxDisplayCost * 100;
|
||||
// Clamp percentage between 0 and 100
|
||||
bar.style.width = Math.max(0, Math.min(100, percent)) + '%';
|
||||
|
||||
// Mark bars that exceed the limit (only if our display max is capped at 50)
|
||||
if (currentMaxDisplayCost === MAX_DISPLAY_COST_CAP && cost > MAX_DISPLAY_COST_CAP) {
|
||||
// Create a darker section at the end with diagonal stripes
|
||||
const darkSection = document.createElement('div');
|
||||
darkSection.className = 'bar-viz dark-section';
|
||||
darkSection.style.width = '15%'; // From 85% to 100%
|
||||
darkSection.style.left = '85%';
|
||||
darkSection.style.backgroundColor = 'rgba(13, 110, 253, 0.6)'; // Darker blue
|
||||
darkSection.style.borderRight = '1px solid rgba(13, 110, 253, 0.8)';
|
||||
darkSection.style.zIndex = '1';
|
||||
// Add diagonal stripes with CSS background
|
||||
darkSection.style.backgroundImage = 'repeating-linear-gradient(45deg, rgba(255,255,255,0.3), rgba(255,255,255,0.3) 5px, transparent 5px, transparent 10px)';
|
||||
bar.parentNode.appendChild(darkSection);
|
||||
|
||||
// Add a dashed "tear line" at the transition point
|
||||
const tearLine = document.createElement('div');
|
||||
tearLine.className = 'tear-line';
|
||||
tearLine.style.position = 'absolute';
|
||||
tearLine.style.left = '85%';
|
||||
// Center the tear line vertically and make it 1.5x as tall as the bar
|
||||
tearLine.style.top = '50%';
|
||||
tearLine.style.transform = 'translateY(-50%)';
|
||||
tearLine.style.height = '54px'; // 1.5x the bar height (36px)
|
||||
tearLine.style.width = '2px';
|
||||
tearLine.style.backgroundColor = 'white';
|
||||
tearLine.style.borderLeft = '2px dashed rgba(0, 0, 0, 0.3)';
|
||||
tearLine.style.zIndex = '2'; // Above the bar
|
||||
bar.parentNode.appendChild(tearLine);
|
||||
}
|
||||
} else {
|
||||
// Set width to 0 if cost is 0 or negative
|
||||
bar.style.width = '0%';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Call this initially to set up the bars
|
||||
updateCostBars();
|
||||
|
||||
// Update cost ticks dynamically based on current max display cost
|
||||
function updateCostTicks() {
|
||||
const costCells = document.querySelectorAll('.cost-bar-cell');
|
||||
if (costCells.length === 0) return;
|
||||
|
||||
const currentMaxDisplayCost = calculateDisplayMaxCost();
|
||||
|
||||
// Remove existing ticks first
|
||||
document.querySelectorAll('.cost-tick').forEach(tick => tick.remove());
|
||||
|
||||
// Generate appropriate tick values based on current max
|
||||
let tickValues = [];
|
||||
|
||||
// Always use $10 increments, regardless of the max
|
||||
const maxTickValue = Math.ceil(currentMaxDisplayCost / 10) * 10; // Round up to nearest $10
|
||||
|
||||
for (let i = 0; i <= maxTickValue; i += 10) {
|
||||
tickValues.push(i);
|
||||
}
|
||||
|
||||
// Calculate percentage positions for each tick
|
||||
const tickPercentages = tickValues.map(tickCost => {
|
||||
return (tickCost / currentMaxDisplayCost) * 100;
|
||||
});
|
||||
|
||||
// Add tick divs to each cost cell
|
||||
costCells.forEach(cell => {
|
||||
const costBar = cell.querySelector('.cost-bar');
|
||||
// Use optional chaining and provide '0' as fallback if costBar or dataset.cost is missing
|
||||
const cost = parseFloat(costBar?.dataset?.cost || '0');
|
||||
|
||||
// Only add ticks if the cost is actually greater than 0
|
||||
if (cost > 0) {
|
||||
tickPercentages.forEach((percent, index) => {
|
||||
// Ensure percentage is within valid range
|
||||
if (percent >= 0 && percent <= 100) {
|
||||
const tick = document.createElement('div');
|
||||
tick.className = 'cost-tick';
|
||||
tick.style.left = `${percent}%`;
|
||||
cell.appendChild(tick);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Call this initially to set up the ticks
|
||||
updateCostTicks();
|
||||
|
||||
|
||||
// --- New Event Listeners ---
|
||||
|
||||
// Listener for mode toggle buttons
|
||||
modeButtons.forEach(button => {
|
||||
button.addEventListener('click', function(event) {
|
||||
const newMode = this.dataset.mode;
|
||||
if (newMode !== currentMode) {
|
||||
// Update active button style
|
||||
modeButtons.forEach(btn => {
|
||||
btn.classList.remove('active');
|
||||
// Reset specific styles potentially added by .active
|
||||
btn.style.backgroundColor = '';
|
||||
btn.style.color = '';
|
||||
});
|
||||
this.classList.add('active');
|
||||
// Apply active styles directly as inline styles might interfere
|
||||
this.style.backgroundColor = '#e7f3ff'; // Use selected row highlight blue
|
||||
this.style.color = '#495057'; // Use dark text for contrast on light blue
|
||||
|
||||
// Update table view and apply filters
|
||||
updateTableView(newMode);
|
||||
applySearchFilter(); // Re-apply search filter when mode changes
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Listener for row selector checkboxes (using event delegation on table body)
|
||||
const tableBody = document.querySelector('table tbody');
|
||||
tableBody.addEventListener('change', function(event) {
|
||||
if (event.target.classList.contains('row-selector') && currentMode === 'select') {
|
||||
const checkbox = event.target;
|
||||
const rowIndex = checkbox.dataset.rowIndex;
|
||||
const mainRow = checkbox.closest('tr');
|
||||
|
||||
if (checkbox.checked) {
|
||||
selectedRows.add(rowIndex);
|
||||
mainRow.classList.add('row-selected');
|
||||
} else {
|
||||
selectedRows.delete(rowIndex);
|
||||
mainRow.classList.remove('row-selected');
|
||||
}
|
||||
// Update select-all checkbox state
|
||||
updateSelectAllCheckboxState();
|
||||
|
||||
// Update cost bars and ticks if in view mode, as selection affects what's shown
|
||||
if (currentMode === 'view') {
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
}
|
||||
}
|
||||
}); // End of tableBody listener
|
||||
|
||||
// Listener for Select All checkbox
|
||||
selectAllCheckbox.addEventListener('change', function() {
|
||||
if (currentMode !== 'select') return;
|
||||
|
||||
const isChecked = selectAllCheckbox.checked;
|
||||
// Select/deselect only the rows that are currently visible
|
||||
const visibleRows = getVisibleMainRows();
|
||||
|
||||
visibleRows.forEach(row => {
|
||||
const checkbox = row.querySelector('.row-selector');
|
||||
const rowIndex = checkbox?.dataset.rowIndex;
|
||||
if (!checkbox || !rowIndex) return; // Skip if no checkbox/index found
|
||||
|
||||
// Only change state if it differs from target state
|
||||
if (checkbox.checked !== isChecked) {
|
||||
checkbox.checked = isChecked;
|
||||
row.classList.toggle('row-selected', isChecked);
|
||||
if (isChecked) {
|
||||
selectedRows.add(rowIndex);
|
||||
} else {
|
||||
selectedRows.delete(rowIndex);
|
||||
}
|
||||
}
|
||||
});
|
||||
// After bulk change, ensure the selectAll checkbox state is correct (not indeterminate)
|
||||
updateSelectAllCheckboxState();
|
||||
|
||||
// Update cost bars and ticks after selection changes
|
||||
updateCostBars();
|
||||
updateCostTicks();
|
||||
});
|
||||
|
||||
// Listener for search input
|
||||
searchInput.addEventListener('input', applySearchFilter);
|
||||
|
||||
// Add toggle functionality for details (Modified to respect modes)
|
||||
const toggleButtons = document.querySelectorAll('.toggle-details');
|
||||
toggleButtons.forEach(button => {
|
||||
button.addEventListener('click', function() {
|
||||
// Only allow toggling in 'detail' mode
|
||||
if (currentMode !== 'detail') return;
|
||||
|
||||
const targetId = this.getAttribute('data-target');
|
||||
const targetRow = document.getElementById(targetId);
|
||||
const mainRow = this.closest('tr'); // Get the main row associated with this button
|
||||
|
||||
if (targetRow && !mainRow.classList.contains('hidden-by-mode') && !mainRow.classList.contains('hidden-by-search')) {
|
||||
const isVisible = targetRow.style.display !== 'none';
|
||||
targetRow.style.display = isVisible ? 'none' : 'table-row';
|
||||
this.textContent = isVisible ? '▶' : '▼';
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Listener for clicking anywhere on a row
|
||||
tableBody.addEventListener('click', function(event) {
|
||||
const clickedRow = event.target.closest('tr');
|
||||
|
||||
// Ensure it's a main row and not a details row or header/footer
|
||||
if (!clickedRow || !clickedRow.id.startsWith('main-row-')) return;
|
||||
|
||||
// --- START conditional logic ---
|
||||
if (currentMode === 'select') {
|
||||
// --- SELECT MODE LOGIC (Existing) ---
|
||||
// Find the checkbox within this row
|
||||
const checkbox = clickedRow.querySelector('.row-selector');
|
||||
if (!checkbox) return; // No checkbox found in this row
|
||||
|
||||
// If the click was directly on the checkbox or its label (if any),
|
||||
// let the default behavior and the 'change' event listener handle it.
|
||||
// Otherwise, toggle the checkbox state programmatically.
|
||||
if (event.target !== checkbox && event.target.tagName !== 'LABEL' /* Add if you use labels */) {
|
||||
checkbox.checked = !checkbox.checked;
|
||||
// Manually trigger the change event to update state and UI
|
||||
checkbox.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
}
|
||||
// --- END SELECT MODE LOGIC ---
|
||||
|
||||
} else if (currentMode === 'view') {
|
||||
// --- VIEW MODE LOGIC (New) ---
|
||||
// Don't highlight if the click was on the details toggle button
|
||||
if (event.target.classList.contains('toggle-details')) {
|
||||
return;
|
||||
}
|
||||
// Toggle the highlight class on the clicked row
|
||||
clickedRow.classList.toggle('view-highlighted');
|
||||
// --- END VIEW MODE LOGIC ---
|
||||
}
|
||||
// --- END conditional logic ---
|
||||
});
|
||||
|
||||
|
||||
// --- Initial Setup ---
|
||||
updateTableView('view'); // Initialize view to 'view' mode
|
||||
applySearchFilter(); // Apply initial search filter (if any text is pre-filled or just to set initial state)
|
||||
|
||||
// Close button functionality
|
||||
const closeControlsBtn = document.getElementById('close-controls-btn');
|
||||
if (closeControlsBtn) {
|
||||
closeControlsBtn.addEventListener('click', function() {
|
||||
const controlsContainer = document.getElementById('controls-container');
|
||||
if (controlsContainer) {
|
||||
controlsContainer.style.display = 'none';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
});
|
||||
@@ -4,7 +4,7 @@ You can send long, multi-line messages in the chat in a few ways:
|
||||
- Or, start with `{tag` (where "tag" is any sequence of letters/numbers) and end with `tag}`. This is useful when you need to include closing braces `}` in your message.
|
||||
- Use Meta-ENTER to start a new line without sending the message (Esc+ENTER in some environments).
|
||||
- Use `/paste` to paste text from the clipboard into the chat.
|
||||
- Use the `/editor` command to open your editor to create the next chat message. See [editor configuration docs](/docs/config/editor.html) for more info.
|
||||
- Use the `/editor` command (or press `Ctrl-X Ctrl-E` if your terminal allows) to open your editor to create the next chat message. See [editor configuration docs](/docs/config/editor.html) for more info.
|
||||
- Use multiline-mode, which swaps the function of Meta-Enter and Enter, so that Enter inserts a newline, and Meta-Enter submits your command. To enable multiline mode:
|
||||
- Use the `/multiline-mode` command to toggle it during a session.
|
||||
- Use the `--multiline` switch.
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
Aider is on
|
||||
<a href="https://github.com/Aider-AI/aider">GitHub</a>
|
||||
and
|
||||
<a href="https://discord.gg/Tv2uQnR88V">Discord</a>.
|
||||
<a href="https://discord.gg/Y7X7bhMQFV">Discord</a>.
|
||||
</footer>
|
||||
|
||||
228
aider/website/_includes/recording.css
Normal file
228
aider/website/_includes/recording.css
Normal file
@@ -0,0 +1,228 @@
|
||||
/* Terminal header styling */
|
||||
.terminal-header {
|
||||
background-color: #e0e0e0;
|
||||
border-top-left-radius: 6px;
|
||||
border-top-right-radius: 6px;
|
||||
padding: 4px 10px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
border-bottom: 1px solid #c0c0c0;
|
||||
}
|
||||
|
||||
.terminal-buttons {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.terminal-button {
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
border-radius: 50%;
|
||||
}
|
||||
|
||||
.terminal-close {
|
||||
background-color: #ff5f56;
|
||||
border: 1px solid #e0443e;
|
||||
}
|
||||
|
||||
.terminal-minimize {
|
||||
background-color: #ffbd2e;
|
||||
border: 1px solid #dea123;
|
||||
}
|
||||
|
||||
.terminal-expand {
|
||||
background-color: #27c93f;
|
||||
border: 1px solid #1aab29;
|
||||
}
|
||||
|
||||
.terminal-title {
|
||||
flex-grow: 1;
|
||||
text-align: center;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
font-size: 11px;
|
||||
color: #666;
|
||||
}
|
||||
|
||||
/* Toast notification styling */
|
||||
.toast-container {
|
||||
position: fixed;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
z-index: 9999;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.toast-notification {
|
||||
background-color: rgba(0, 0, 0, 0.7);
|
||||
color: white;
|
||||
padding: 12px 25px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 10px;
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
|
||||
opacity: 0;
|
||||
transition: opacity 0.3s ease-in-out;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
font-size: 18px;
|
||||
text-align: center;
|
||||
display: inline-block;
|
||||
min-width: 200px;
|
||||
max-width: 90%;
|
||||
}
|
||||
|
||||
/* Page container styling */
|
||||
.page-container {
|
||||
max-width: 950px;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
/* macOS backdrop styling */
|
||||
.macos-backdrop {
|
||||
background: linear-gradient(135deg, #ff9966, #ff5e62, #6666ff, #0066ff);
|
||||
border-radius: 12px;
|
||||
padding: clamp(5px, 5vw, 50px) clamp(5px, 2.5vw, 50px);
|
||||
margin: 20px 0;
|
||||
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.2);
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* Add subtle wave animation to backdrop */
|
||||
.macos-backdrop::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background: radial-gradient(circle at center, rgba(255,255,255,0.1) 0%, rgba(255,255,255,0) 70%);
|
||||
opacity: 0.7;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
/* Add decorative curved lines to the backdrop */
|
||||
.macos-backdrop::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background-image:
|
||||
radial-gradient(circle at 20% 30%, transparent 0%, transparent 60%, rgba(255,255,255,0.2) 61%, transparent 62%),
|
||||
radial-gradient(circle at 80% 70%, transparent 0%, transparent 40%, rgba(255,255,255,0.2) 41%, transparent 42%),
|
||||
radial-gradient(circle at 40% 90%, transparent 0%, transparent 70%, rgba(255,255,255,0.2) 71%, transparent 72%),
|
||||
radial-gradient(circle at 60% 10%, transparent 0%, transparent 50%, rgba(255,255,255,0.2) 51%, transparent 52%);
|
||||
background-size: 100% 100%;
|
||||
opacity: 1;
|
||||
pointer-events: none;
|
||||
z-index: 0;
|
||||
}
|
||||
|
||||
.terminal-container {
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
position: relative;
|
||||
background-color: white; /* Add background color to terminal container */
|
||||
z-index: 2; /* Ensure terminal appears above the backdrop effects */
|
||||
}
|
||||
|
||||
/* Timestamp link styling */
|
||||
.timestamp-link {
|
||||
color: #0366d6;
|
||||
text-decoration: none;
|
||||
font-weight: bold;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.timestamp-link:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
/* Active timestamp styling */
|
||||
.timestamp-active {
|
||||
background-color: #f0f8ff; /* Light blue background */
|
||||
border-radius: 3px;
|
||||
padding: 2px 4px;
|
||||
margin: -2px -4px;
|
||||
}
|
||||
|
||||
/* Highlight the list item containing the active timestamp */
|
||||
li.active-marker {
|
||||
background-color: #f6f8fa;
|
||||
border-radius: 4px;
|
||||
padding: 4px 8px;
|
||||
margin-left: -8px;
|
||||
}
|
||||
|
||||
/* Make list items clickable */
|
||||
.transcript-item {
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s ease;
|
||||
padding: 4px 8px;
|
||||
margin-left: -8px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.transcript-item:hover {
|
||||
background-color: #f0f0f0;
|
||||
}
|
||||
|
||||
/* Keyboard shortcuts styling */
|
||||
.keyboard-shortcuts {
|
||||
text-align: center;
|
||||
font-size: 14px;
|
||||
color: #666;
|
||||
margin-top: 10px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
/* Hide keyboard shortcuts on devices likely without physical keyboards */
|
||||
.no-physical-keyboard .keyboard-shortcuts {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.keyboard-shortcuts kbd {
|
||||
background-color: #f7f7f7;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 3px;
|
||||
box-shadow: 0 1px 0 rgba(0,0,0,0.2);
|
||||
color: #333;
|
||||
display: inline-block;
|
||||
font-family: monospace;
|
||||
line-height: 1;
|
||||
margin: 0 2px;
|
||||
padding: 3px 5px;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.asciinema-player-theme-aider {
|
||||
/* Foreground (default text) color */
|
||||
--term-color-foreground: #444444; /* colour238 */
|
||||
|
||||
/* Background color */
|
||||
--term-color-background: #dadada; /* colour253 */
|
||||
|
||||
/* Palette of 16 standard ANSI colors */
|
||||
--term-color-0: #21222c;
|
||||
--term-color-1: #ff5555;
|
||||
--term-color-2: #50fa7b;
|
||||
--term-color-3: #f1fa8c;
|
||||
--term-color-4: #bd93f9;
|
||||
--term-color-5: #ff79c6;
|
||||
--term-color-6: #8be9fd;
|
||||
--term-color-7: #f8f8f2;
|
||||
--term-color-8: #6272a4;
|
||||
--term-color-9: #ff6e6e;
|
||||
--term-color-10: #69ff94;
|
||||
--term-color-11: #ffffa5;
|
||||
--term-color-12: #d6acff;
|
||||
--term-color-13: #ff92df;
|
||||
--term-color-14: #a4ffff;
|
||||
--term-color-15: #ffffff;
|
||||
}
|
||||
428
aider/website/_includes/recording.js
Normal file
428
aider/website/_includes/recording.js
Normal file
@@ -0,0 +1,428 @@
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
let player; // Store player reference to make it accessible to click handlers
|
||||
let globalAudio; // Global audio element to be reused
|
||||
|
||||
// Detect if device likely has no physical keyboard
|
||||
function detectNoKeyboard() {
|
||||
// Check if it's a touch device (most mobile devices)
|
||||
const isTouchDevice = ('ontouchstart' in window) ||
|
||||
(navigator.maxTouchPoints > 0) ||
|
||||
(navigator.msMaxTouchPoints > 0);
|
||||
|
||||
// Check common mobile user agents as additional signal
|
||||
const isMobileUA = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent);
|
||||
|
||||
// If it's a touch device and has a mobile user agent, likely has no physical keyboard
|
||||
if (isTouchDevice && isMobileUA) {
|
||||
document.body.classList.add('no-physical-keyboard');
|
||||
}
|
||||
}
|
||||
|
||||
// Run detection
|
||||
detectNoKeyboard();
|
||||
|
||||
// Parse the transcript section to create markers and convert timestamps to links
|
||||
function parseTranscript() {
|
||||
const markers = [];
|
||||
// Find the Commentary heading
|
||||
const transcriptHeading = Array.from(document.querySelectorAll('h2')).find(el => el.textContent.trim() === 'Commentary');
|
||||
|
||||
if (transcriptHeading) {
|
||||
// Get all list items after the transcript heading
|
||||
let currentElement = transcriptHeading.nextElementSibling;
|
||||
|
||||
while (currentElement && currentElement.tagName === 'UL') {
|
||||
const listItems = currentElement.querySelectorAll('li');
|
||||
|
||||
listItems.forEach(item => {
|
||||
const text = item.textContent.trim();
|
||||
const match = text.match(/(\d+):(\d+)\s+(.*)/);
|
||||
|
||||
if (match) {
|
||||
const minutes = parseInt(match[1], 10);
|
||||
const seconds = parseInt(match[2], 10);
|
||||
const timeInSeconds = minutes * 60 + seconds;
|
||||
const formattedTime = `${minutes}:${seconds.toString().padStart(2, '0')}`;
|
||||
const message = match[3].trim();
|
||||
|
||||
// Create link for the timestamp
|
||||
const timeLink = document.createElement('a');
|
||||
timeLink.href = '#';
|
||||
timeLink.textContent = formattedTime;
|
||||
timeLink.className = 'timestamp-link';
|
||||
timeLink.dataset.time = timeInSeconds;
|
||||
timeLink.dataset.message = message;
|
||||
|
||||
// Add click event to seek the player
|
||||
timeLink.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
if (player && typeof player.seek === 'function') {
|
||||
player.seek(timeInSeconds);
|
||||
player.play();
|
||||
|
||||
// Also trigger toast and speech
|
||||
showToast(message);
|
||||
speakText(message, timeInSeconds);
|
||||
|
||||
// Highlight this timestamp
|
||||
highlightTimestamp(timeInSeconds);
|
||||
}
|
||||
});
|
||||
|
||||
// Replace text with the link + message
|
||||
item.textContent = '';
|
||||
item.appendChild(timeLink);
|
||||
item.appendChild(document.createTextNode(' ' + message));
|
||||
|
||||
// Add class and click handler to the entire list item
|
||||
item.classList.add('transcript-item');
|
||||
item.dataset.time = timeInSeconds;
|
||||
item.dataset.message = message;
|
||||
|
||||
item.addEventListener('click', function(e) {
|
||||
// Prevent click event if the user clicked directly on the timestamp link
|
||||
// This prevents double-firing of the event
|
||||
if (e.target !== timeLink) {
|
||||
e.preventDefault();
|
||||
if (player && typeof player.seek === 'function') {
|
||||
player.seek(timeInSeconds);
|
||||
player.play();
|
||||
|
||||
// Also trigger toast and speech
|
||||
showToast(message);
|
||||
speakText(message, timeInSeconds);
|
||||
|
||||
// Highlight this timestamp
|
||||
highlightTimestamp(timeInSeconds);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
markers.push([timeInSeconds, message]);
|
||||
}
|
||||
});
|
||||
|
||||
currentElement = currentElement.nextElementSibling;
|
||||
}
|
||||
}
|
||||
|
||||
return markers;
|
||||
}
|
||||
|
||||
// Parse transcript and create markers
|
||||
const markers = parseTranscript();
|
||||
|
||||
// Create player with a single call
|
||||
player = AsciinemaPlayer.create(
|
||||
recording_url,
|
||||
document.getElementById('demo'),
|
||||
{
|
||||
speed: 1.25,
|
||||
idleTimeLimit: 1,
|
||||
theme: "aider",
|
||||
poster: "npt:0:01",
|
||||
markers: markers,
|
||||
controls: true
|
||||
}
|
||||
);
|
||||
|
||||
// Focus on the player element so keyboard shortcuts work immediately
|
||||
setTimeout(() => {
|
||||
// Use setTimeout to ensure the player is fully initialized
|
||||
if (player && typeof player.focus === 'function') {
|
||||
player.focus();
|
||||
} else {
|
||||
// If player doesn't have a focus method, try to find and focus the terminal element
|
||||
const playerElement = document.querySelector('.asciinema-terminal');
|
||||
if (playerElement) {
|
||||
playerElement.focus();
|
||||
} else {
|
||||
// Last resort - try to find element with tabindex
|
||||
const tabbableElement = document.querySelector('[tabindex]');
|
||||
if (tabbableElement) {
|
||||
tabbableElement.focus();
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 100);
|
||||
|
||||
// Track active toast elements
|
||||
let activeToast = null;
|
||||
|
||||
// Function to display toast notification
|
||||
function showToast(text) {
|
||||
// Get the appropriate container based on fullscreen state
|
||||
let container = document.getElementById('toast-container');
|
||||
const isFullscreen = document.fullscreenElement ||
|
||||
document.webkitFullscreenElement ||
|
||||
document.mozFullScreenElement ||
|
||||
document.msFullscreenElement;
|
||||
|
||||
// If in fullscreen, check if we need to create a fullscreen toast container
|
||||
if (isFullscreen) {
|
||||
// Target the fullscreen element as the container parent
|
||||
const fullscreenElement = document.fullscreenElement ||
|
||||
document.webkitFullscreenElement ||
|
||||
document.mozFullScreenElement ||
|
||||
document.msFullscreenElement;
|
||||
|
||||
// Look for an existing fullscreen toast container
|
||||
let fsContainer = fullscreenElement.querySelector('.fs-toast-container');
|
||||
|
||||
if (!fsContainer) {
|
||||
// Create a new container for fullscreen mode
|
||||
fsContainer = document.createElement('div');
|
||||
fsContainer.className = 'toast-container fs-toast-container';
|
||||
fsContainer.id = 'fs-toast-container';
|
||||
fullscreenElement.appendChild(fsContainer);
|
||||
}
|
||||
|
||||
container = fsContainer;
|
||||
}
|
||||
|
||||
// Remove any existing toast
|
||||
if (activeToast) {
|
||||
hideToast(activeToast);
|
||||
}
|
||||
|
||||
// Create toast element
|
||||
const toast = document.createElement('div');
|
||||
toast.className = 'toast-notification';
|
||||
toast.textContent = text;
|
||||
|
||||
// Add to container
|
||||
container.appendChild(toast);
|
||||
|
||||
// Store reference to active toast
|
||||
activeToast = {
|
||||
element: toast,
|
||||
container: container
|
||||
};
|
||||
|
||||
// Trigger animation
|
||||
setTimeout(() => {
|
||||
toast.style.opacity = '1';
|
||||
}, 10);
|
||||
|
||||
return activeToast;
|
||||
}
|
||||
|
||||
// Function to hide a toast
|
||||
function hideToast(toastInfo) {
|
||||
if (!toastInfo || !toastInfo.element) return;
|
||||
|
||||
toastInfo.element.style.opacity = '0';
|
||||
setTimeout(() => {
|
||||
if (toastInfo.container && toastInfo.container.contains(toastInfo.element)) {
|
||||
toastInfo.container.removeChild(toastInfo.element);
|
||||
}
|
||||
|
||||
// If this was the active toast, clear the reference
|
||||
if (activeToast === toastInfo) {
|
||||
activeToast = null;
|
||||
}
|
||||
}, 300); // Wait for fade out animation
|
||||
}
|
||||
|
||||
// Track if TTS is currently in progress to prevent duplicates
|
||||
let ttsInProgress = false;
|
||||
let currentToast = null;
|
||||
|
||||
// Improved browser TTS function
|
||||
function useBrowserTTS(text) {
|
||||
// Don't start new speech if already in progress
|
||||
if (ttsInProgress) {
|
||||
console.log('Speech synthesis already in progress, skipping');
|
||||
return false;
|
||||
}
|
||||
|
||||
if ('speechSynthesis' in window) {
|
||||
console.log('Using browser TTS fallback');
|
||||
|
||||
// Set flag to prevent duplicate speech
|
||||
ttsInProgress = true;
|
||||
|
||||
// Cancel any ongoing speech
|
||||
window.speechSynthesis.cancel();
|
||||
|
||||
const utterance = new SpeechSynthesisUtterance(text);
|
||||
utterance.rate = 1.0;
|
||||
utterance.pitch = 1.0;
|
||||
utterance.volume = 1.0;
|
||||
|
||||
// For iOS, use a shorter utterance if possible
|
||||
if (/iPad|iPhone|iPod/.test(navigator.userAgent) && !window.MSStream) {
|
||||
utterance.text = text.length > 100 ? text.substring(0, 100) + '...' : text;
|
||||
}
|
||||
|
||||
utterance.onstart = () => console.log('Speech started');
|
||||
utterance.onend = () => {
|
||||
console.log('Speech ended');
|
||||
ttsInProgress = false; // Reset flag when speech completes
|
||||
|
||||
// Hide toast when speech ends
|
||||
if (currentToast) {
|
||||
hideToast(currentToast);
|
||||
currentToast = null;
|
||||
}
|
||||
};
|
||||
utterance.onerror = (e) => {
|
||||
console.warn('Speech error:', e);
|
||||
ttsInProgress = false; // Reset flag on error
|
||||
|
||||
// Also hide toast on error
|
||||
if (currentToast) {
|
||||
hideToast(currentToast);
|
||||
currentToast = null;
|
||||
}
|
||||
};
|
||||
|
||||
window.speechSynthesis.speak(utterance);
|
||||
return true;
|
||||
}
|
||||
console.warn('SpeechSynthesis not supported');
|
||||
return false;
|
||||
}
|
||||
|
||||
// Function to play pre-generated TTS audio files
|
||||
function speakText(text, timeInSeconds) {
|
||||
// Show the toast and keep reference
|
||||
currentToast = showToast(text);
|
||||
|
||||
// Format time for filename (MM-SS)
|
||||
const minutes = Math.floor(timeInSeconds / 60);
|
||||
const seconds = timeInSeconds % 60;
|
||||
const formattedTime = `${minutes.toString().padStart(2, '0')}-${seconds.toString().padStart(2, '0')}`;
|
||||
|
||||
// Get recording_id from the page or use default from the URL
|
||||
const recordingId = typeof recording_id !== 'undefined' ? recording_id :
|
||||
window.location.pathname.split('/').pop().replace('.html', '');
|
||||
|
||||
// Construct audio file path
|
||||
const audioPath = `/assets/audio/${recordingId}/${formattedTime}.mp3`;
|
||||
|
||||
// Log for debugging
|
||||
console.log(`Attempting to play audio: ${audioPath}`);
|
||||
|
||||
// Detect iOS
|
||||
const isIOS = /iPad|iPhone|iPod/.test(navigator.userAgent) && !window.MSStream;
|
||||
console.log(`Device is iOS: ${isIOS}`);
|
||||
|
||||
// Flag to track if we've already fallen back to TTS
|
||||
let fallenBackToTTS = false;
|
||||
|
||||
try {
|
||||
// Create or reuse audio element
|
||||
if (!globalAudio) {
|
||||
globalAudio = new Audio();
|
||||
console.log("Created new global Audio element");
|
||||
}
|
||||
|
||||
// Set up event handlers
|
||||
globalAudio.onended = () => {
|
||||
console.log('Audio playback ended');
|
||||
// Hide toast when audio ends
|
||||
if (currentToast) {
|
||||
hideToast(currentToast);
|
||||
currentToast = null;
|
||||
}
|
||||
};
|
||||
|
||||
globalAudio.onerror = (e) => {
|
||||
console.warn(`Audio error: ${e.type}`, e);
|
||||
if (!fallenBackToTTS) {
|
||||
fallenBackToTTS = true;
|
||||
useBrowserTTS(text);
|
||||
} else if (currentToast) {
|
||||
// If we've already tried TTS and that failed too, hide the toast
|
||||
hideToast(currentToast);
|
||||
currentToast = null;
|
||||
}
|
||||
};
|
||||
|
||||
// For iOS, preload might help with subsequent plays
|
||||
if (isIOS) {
|
||||
globalAudio.preload = "auto";
|
||||
}
|
||||
|
||||
// Set the new source
|
||||
globalAudio.src = audioPath;
|
||||
|
||||
// Play with proper error handling
|
||||
const playPromise = globalAudio.play();
|
||||
|
||||
if (playPromise !== undefined) {
|
||||
playPromise.catch(error => {
|
||||
console.warn(`Play error: ${error.message}`);
|
||||
|
||||
// On iOS, a user gesture might be required
|
||||
if (isIOS) {
|
||||
console.log("iOS playback failed, trying SpeechSynthesis");
|
||||
}
|
||||
|
||||
if (!fallenBackToTTS) {
|
||||
fallenBackToTTS = true;
|
||||
useBrowserTTS(text);
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`Exception in audio playback: ${e.message}`);
|
||||
useBrowserTTS(text);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to highlight the active timestamp in the transcript
|
||||
function highlightTimestamp(timeInSeconds) {
|
||||
// Remove previous highlights
|
||||
document.querySelectorAll('.timestamp-active').forEach(el => {
|
||||
el.classList.remove('timestamp-active');
|
||||
});
|
||||
|
||||
document.querySelectorAll('.active-marker').forEach(el => {
|
||||
el.classList.remove('active-marker');
|
||||
});
|
||||
|
||||
// Find the timestamp link with matching time
|
||||
const timestampLinks = document.querySelectorAll('.timestamp-link');
|
||||
let activeLink = null;
|
||||
|
||||
for (const link of timestampLinks) {
|
||||
if (parseInt(link.dataset.time) === timeInSeconds) {
|
||||
activeLink = link;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (activeLink) {
|
||||
// Add highlight class to the link
|
||||
activeLink.classList.add('timestamp-active');
|
||||
|
||||
// Also highlight the parent list item
|
||||
const listItem = activeLink.closest('li');
|
||||
if (listItem) {
|
||||
listItem.classList.add('active-marker');
|
||||
|
||||
// No longer scrolling into view to avoid shifting focus
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add event listener with safety checks
|
||||
if (player && typeof player.addEventListener === 'function') {
|
||||
player.addEventListener('marker', function(event) {
|
||||
try {
|
||||
const { index, time, label } = event;
|
||||
console.log(`marker! ${index} - ${time} - ${label}`);
|
||||
|
||||
// Speak the marker label (toast is now shown within speakText)
|
||||
speakText(label, time);
|
||||
|
||||
// Highlight the corresponding timestamp in the transcript
|
||||
highlightTimestamp(time);
|
||||
} catch (error) {
|
||||
console.error('Error in marker event handler:', error);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
34
aider/website/_includes/recording.md
Normal file
34
aider/website/_includes/recording.md
Normal file
@@ -0,0 +1,34 @@
|
||||
<link rel="stylesheet" type="text/css" href="/assets/asciinema/asciinema-player.css" />
|
||||
|
||||
<style>
|
||||
{% include recording.css %}
|
||||
</style>
|
||||
|
||||
<script src="/assets/asciinema/asciinema-player.min.js"></script>
|
||||
<script>
|
||||
{% include recording.js %}
|
||||
</script>
|
||||
|
||||
<div class="page-container">
|
||||
<div class="toast-container" id="toast-container"></div>
|
||||
|
||||
<div class="macos-backdrop">
|
||||
<div class="terminal-container">
|
||||
<div class="terminal-header">
|
||||
<div class="terminal-buttons">
|
||||
<div class="terminal-button terminal-close"></div>
|
||||
<div class="terminal-button terminal-minimize"></div>
|
||||
<div class="terminal-button terminal-expand"></div>
|
||||
</div>
|
||||
<div class="terminal-title">aider</div>
|
||||
</div>
|
||||
<div id="demo"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="keyboard-shortcuts">
|
||||
<kbd>Space</kbd> Play/pause —
|
||||
<kbd>f</kbd> Fullscreen —
|
||||
<kbd>←</kbd><kbd>→</kbd> ±5s
|
||||
</div>
|
||||
@@ -15,12 +15,12 @@ nav_exclude: true
|
||||
I recently wanted to draw a graph showing how LLM code editing skill has been
|
||||
changing over time as new models have been released by OpenAI, Anthropic and others.
|
||||
I have all the
|
||||
[data in a yaml file](https://github.com/Aider-AI/aider/blob/main/website/_data/edit_leaderboard.yml) that is used to render
|
||||
[data in a YAML file](https://github.com/Aider-AI/aider/blob/main/website/_data/edit_leaderboard.yml) that is used to render
|
||||
[aider's LLM leaderboards](https://aider.chat/docs/leaderboards/).
|
||||
|
||||
Below is the aider chat transcript, which shows:
|
||||
|
||||
- I launch aider with the yaml file, a file with other plots I've done recently (so GPT can crib the style) and an empty file called `over_time.py`.
|
||||
- I launch aider with the YAML file, a file with other plots I've done recently (so GPT can crib the style) and an empty file called `over_time.py`.
|
||||
- Then I ask GPT to draw the scatterplot I want.
|
||||
- I run the resulting script and share the error output with GPT so it can fix a small bug.
|
||||
- I ask it to color the points for GPT-4 and GPT-3.5 family models differently, to better see trends within those model families.
|
||||
@@ -28,7 +28,7 @@ Below is the aider chat transcript, which shows:
|
||||
- I work through a series of other small style changes, like changing fonts and the graph border.
|
||||
|
||||
In the end I have the graph, but I also have the python code in my repo.
|
||||
So I can update this graph easily whenever I add new entries to the yaml data file.
|
||||
So I can update this graph easily whenever I add new entries to the YAML data file.
|
||||
|
||||
|
||||
## Aider chat transcript
|
||||
|
||||
114
aider/website/_posts/2025-05-07-gemini-cost.md
Normal file
114
aider/website/_posts/2025-05-07-gemini-cost.md
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
title: Gemini 2.5 Pro Preview 03-25 benchmark cost
|
||||
excerpt: The $6.32 benchmark cost reported for Gemini 2.5 Pro Preview 03-25 was incorrect.
|
||||
draft: false
|
||||
nav_exclude: true
|
||||
---
|
||||
{% if page.date %}
|
||||
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
|
||||
{% endif %}
|
||||
|
||||
# Gemini 2.5 Pro Preview 03-25 benchmark cost
|
||||
|
||||
## Summary
|
||||
The $6.32 cost reported to run the aider polyglot benchmark on
|
||||
Gemini 2.5 Pro Preview 03-25 was incorrect.
|
||||
The true cost was higher, possibly significantly so.
|
||||
The incorrect cost has been removed from the leaderboard.
|
||||
|
||||
An investigation determined the primary cause was that the litellm
|
||||
package (used by aider for LLM API connections) was not properly including reasoning tokens in
|
||||
the token counts it reported.
|
||||
While an incorrect price-per-token entry for the model also existed in litellm's cost
|
||||
database at that time, this was found not to be a contributing factor.
|
||||
Aider's own internal, correct pricing data was utilized during the benchmark.
|
||||
|
||||
## Resolution
|
||||
|
||||
Litellm began correctly including reasoning tokens in the reported counts
|
||||
on April 21, 2025 in
|
||||
commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b).
|
||||
This change was released in litellm v1.67.1.
|
||||
Aider picked up this change April 28, 2025 when it upgraded its litellm dependency
|
||||
from v1.65.7 to v1.67.4.post1
|
||||
in commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37).
|
||||
That dependency change shipped on May 5, 2025 in aider v0.82.3.
|
||||
|
||||
Unfortunately the 03-25 version of Gemini 2.5 Pro Preview is no longer available,
|
||||
so it is not possible to re-run the benchmark to obtain an accurate cost.
|
||||
As a possibly relevant comparison, the newer 05-06 version of Gemini 2.5 Pro Preview
|
||||
completed the benchmark at a cost of about $37.
|
||||
|
||||
## Investigation detail
|
||||
|
||||
The version of litellm available at that time of the benchmark appears to have been
|
||||
excluding reasoning tokens from the token counts it reported.
|
||||
So even though aider had correct per-token pricing, it did not have the correct token counts
|
||||
used during the benchmark.
|
||||
This resulted in an underestimate of the benchmark costs.
|
||||
|
||||
The incorrect litellm database entry does not appear to have affected the aider benchmark costs.
|
||||
Aider maintains and uses its own database of costs for some models, and it contained
|
||||
the correct pricing at the time of the benchmark.
|
||||
Aider appears to have
|
||||
loaded the correct cost data from its database and made use of it during the benchmark.
|
||||
|
||||
Every aider benchmark report contains the git commit hash of the aider repository state used to
|
||||
run the benchmark.
|
||||
The
|
||||
[benchmark run in question](https://github.com/Aider-AI/aider/blob/edbfec0ce4e1fe86735c915cb425b0d8636edc32/aider/website/_data/polyglot_leaderboard.yml#L814)
|
||||
was built from
|
||||
commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
|
||||
|
||||
Additional runs of the benchmark from that build verified that the error in litellm's
|
||||
model cost database appears not to have been a factor:
|
||||
|
||||
- Aider's internal model database correctly overrides the litellm database, which contained an incorrect token cost at the time.
|
||||
- The correct pricing is loaded from aider's internal model database and produces similar (incorrect) costs as the original run.
|
||||
- Updating aider's internal model database with an absurdly high token cost resulted in an appropriately high benchmark cost report, demonstrating that the internal database costs were in effect.
|
||||
|
||||
This specific build of aider was then updated with various versions of litellm using `git biset`
|
||||
to identify the first litellm commit where reasoning tokens counts were correctly reported.
|
||||
|
||||
|
||||
|
||||
## Timeline
|
||||
|
||||
Below is the full timeline of git commits related to this issue in the aider and litellm repositories.
|
||||
Each entry has a UTC timestamp, followed by the original literal timestamp obtained from the
|
||||
relevant source.
|
||||
|
||||
- 2025-04-04 19:54:45 UTC (Sat Apr 5 08:54:45 2025 +1300)
|
||||
- Correct value `"output_cost_per_token": 0.000010` for `gemini/gemini-2.5-pro-preview-03-25` added to `aider/resources/model-metadata.json`
|
||||
- Commit [eda796d](https://github.com/Aider-AI/aider/commit/eda796d) in aider.
|
||||
|
||||
- 2025-04-05 16:20:01 UTC (Sun Apr 6 00:20:01 2025 +0800)
|
||||
- First litellm commit of `gemini/gemini-2.5-pro-preview-03-25` metadata, with incorrect price `"output_cost_per_token": 0.0000010`
|
||||
- Commit [cd0a1e6](https://github.com/BerriAI/litellm/commit/cd0a1e6) in litellm.
|
||||
|
||||
- 2025-04-10 01:48:43 UTC (Wed Apr 9 18:48:43 2025 -0700)
|
||||
- litellm commit updates `gemini/gemini-2.5-pro-preview-03-25` metadata, but not price
|
||||
- Commit [ac4f32f](https://github.com/BerriAI/litellm/commit/ac4f32f) in litellm.
|
||||
|
||||
- 2025-04-12 04:55:50 UTC (2025-04-12-04-55-50 UTC)
|
||||
- Benchmark performed.
|
||||
- Aider repo hash [0282574 recorded in benchmark results](https://github.com/Aider-AI/aider/blob/7fbeafa1cfd4ad83f7499417837cdfa6b16fe7a1/aider/website/_data/polyglot_leaderboard.yml#L814), without a "dirty" annotation, indicating that the benchmark was run on a clean checkout of the aider repo at commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
|
||||
- Correct value `"output_cost_per_token": 0.000010` is in `aider/resources/model-metadata.json` at this commit [0282574](https://github.com/Aider-AI/aider/blob/0282574/aider/resources/model-metadata.json#L357).
|
||||
|
||||
- 2025-04-12 15:06:39 UTC (Apr 12 08:06:39 2025 -0700)
|
||||
- Benchmark results added to aider repo.
|
||||
- Commit [7fbeafa](https://github.com/Aider-AI/aider/commit/7fbeafa) in aider.
|
||||
|
||||
- 2025-04-12 15:20:04 UTC (Sat Apr 12 19:20:04 2025 +0400)
|
||||
- litellm commit fixes `gemini/gemini-2.5-pro-preview-03-25` price metadata to `"output_cost_per_token": 0.00001`
|
||||
- Commit [93037ea](https://github.com/BerriAI/litellm/commit/93037ea) in litellm.
|
||||
|
||||
- 2025-04-22 05:48:00 UTC (Mon Apr 21 22:48:00 2025 -0700)
|
||||
- Litellm started including reasoning tokens in token count reporting.
|
||||
- Commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b) in litellm.
|
||||
- This fix was released in litellm v1.67.1.
|
||||
|
||||
- 2025-04-28 14:53:20 UTC (Mon Apr 28 07:53:20 2025 -0700)
|
||||
- Aider upgraded its litellm dependency from v1.65.7 to v1.67.4.post1, which included the reasoning token count fix.
|
||||
- Commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37) in aider.
|
||||
- This dependency change shipped on May 5, 2025 in aider v0.82.3.
|
||||
365
aider/website/_posts/2025-05-08-qwen3.md
Normal file
365
aider/website/_posts/2025-05-08-qwen3.md
Normal file
@@ -0,0 +1,365 @@
|
||||
---
|
||||
layout: post
|
||||
title: Qwen3 benchmark results
|
||||
excerpt: "Benchmark results for Qwen3 models using the Aider polyglot coding benchmark."
|
||||
highlight_image: /assets/2025-05-08-qwen3.jpg
|
||||
date: 2025-05-08
|
||||
---
|
||||
|
||||
# Qwen3 results on the aider polyglot benchmark
|
||||
|
||||
As [previously discussed when Qwen2.5 was released](/2024/11/21/quantization.html),
|
||||
details matter when working with open source models for AI coding.
|
||||
Proprietary models are served by their creators or trusted providers with stable inference settings.
|
||||
Open source models are wonderful because anyone can serve them,
|
||||
but API providers can use very different inference settings, quantizations, etc.
|
||||
|
||||
Below are collection of aider polyglot benchmark results for the new Qwen3 models.
|
||||
Results are presented using both "diff" and "whole"
|
||||
[edit formats](https://aider.chat/docs/more/edit-formats.html),
|
||||
with various models settings, against various API providers.
|
||||
|
||||
See details on the
|
||||
[model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
|
||||
used after the results table.
|
||||
|
||||
{: .note }
|
||||
This article is being updated as new results become available.
|
||||
Also, some results were submitted by aider users and have not been verified.
|
||||
|
||||
<h2 id="leaderboard-title">Qwen3 results on the aider polyglot benchmark</h2>
|
||||
|
||||
<div id="controls-container" style="display: flex; align-items: center; width: 100%; max-width: 800px; margin: 10px auto; gap: 10px; box-sizing: border-box; padding: 0 5px; position: relative;">
|
||||
<input type="text" id="editSearchInput" placeholder="Search..." style="flex-grow: 1; padding: 8px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
<div id="view-mode-toggle" style="display: inline-flex; border: 1px solid #ccc; border-radius: 4px;">
|
||||
<button id="mode-view-btn" class="mode-button active" data-mode="view" style="padding: 8px 8px; border: none; border-radius: 3px 0 0 3px; cursor: pointer; font-size: 14px; line-height: 1.5; min-width: 50px;">View</button>
|
||||
<button id="mode-select-btn" class="mode-button" data-mode="select" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Select</button>
|
||||
<button id="mode-detail-btn" class="mode-button" data-mode="detail" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0 3px 3px 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Detail</button>
|
||||
</div>
|
||||
<button id="close-controls-btn" style="width: 18px; height: 18px; padding: 0; border: 1px solid #ddd; border-radius: 50%; background-color: transparent; cursor: pointer; display: flex; align-items: center; justify-content: center; font-size: 12px; margin-left: 4px; color: #999;">×</button>
|
||||
</div>
|
||||
|
||||
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
|
||||
<thead style="background-color: #f2f2f2;">
|
||||
<tr>
|
||||
<th style="padding: 8px; width: 40px; text-align: center; vertical-align: middle;">
|
||||
<input type="checkbox" id="select-all-checkbox" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</th> <!-- Header checkbox added here -->
|
||||
<th style="padding: 8px; text-align: left;">Model</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Percent correct</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Cost</th>
|
||||
<th style="padding: 8px; text-align: left;" class="col-command">Command</th>
|
||||
<th style="padding: 8px; text-align: center; width: 10%" class="col-conform">Correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left; width: 10%" class="col-edit-format">Edit Format</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% assign max_cost = 0 %}
|
||||
{% for row in site.data.qwen3_leaderboard %}
|
||||
{% if row.total_cost > max_cost %}
|
||||
{% assign max_cost = row.total_cost %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if max_cost == 0 %}{% assign max_cost = 1 %}{% endif %}
|
||||
{% assign edit_sorted = site.data.qwen3_leaderboard | sort: 'pass_rate_2' | reverse %}
|
||||
{% for row in edit_sorted %} {% comment %} Add loop index for unique IDs {% endcomment %}
|
||||
{% assign row_index = forloop.index0 %}
|
||||
<tr id="main-row-{{ row_index }}">
|
||||
<td style="padding: 8px; text-align: center; vertical-align: middle;">
|
||||
<button class="toggle-details" data-target="details-{{ row_index }}" style="background: none; border: none; cursor: pointer; font-size: 16px; padding: 0; vertical-align: middle;">▶</button>
|
||||
<input type="checkbox" class="row-selector" data-row-index="{{ row_index }}" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</td>
|
||||
<td style="padding: 8px;"><span>{{ row.model }}</span></td>
|
||||
<td class="bar-cell">
|
||||
<div class="bar-viz" style="width: {{ row.pass_rate_2 }}%; background-color: rgba(40, 167, 69, 0.3); border-right: 1px solid rgba(40, 167, 69, 0.5);"></div>
|
||||
<span>{{ row.pass_rate_2 }}%</span>
|
||||
</td>
|
||||
<td class="bar-cell cost-bar-cell">
|
||||
{% if row.total_cost > 0 %}
|
||||
<div class="bar-viz cost-bar" data-cost="{{ row.total_cost }}" data-max-cost="{{ max_cost }}" style="width: 0%; background-color: rgba(13, 110, 253, 0.3); border-right: 1px solid rgba(13, 110, 253, 0.5);"></div>
|
||||
{% endif %}
|
||||
{% assign rounded_cost = row.total_cost | times: 1.0 | round: 2 %}
|
||||
<span>{% if row.total_cost == 0 or rounded_cost == 0.00 %}{% else %}${{ rounded_cost }}{% endif %}</span>
|
||||
</td>
|
||||
<td style="padding: 8px;" class="col-command"><span><code>{{ row.command }}</code></span></td>
|
||||
<td style="padding: 8px; text-align: center;" class="col-conform"><span>{{ row.percent_cases_well_formed }}%</span></td>
|
||||
<td style="padding: 8px;" class="col-edit-format"><span>{{ row.edit_format }}</span></td>
|
||||
</tr>
|
||||
<tr class="details-row" id="details-{{ row_index }}" style="display: none; background-color: #f9f9f9;">
|
||||
<td colspan="7" style="padding: 15px; border-bottom: 1px solid #ddd;">
|
||||
<ul style="margin: 0; padding-left: 20px; list-style: none; border-bottom: 1px solid #ddd;">
|
||||
{% for pair in row %}
|
||||
{% if pair[1] != "" and pair[1] != nil %}
|
||||
<li><strong>
|
||||
{% if pair[0] == 'percent_cases_well_formed' %}
|
||||
Percent cases well formed
|
||||
{% else %}
|
||||
{{ pair[0] | replace: '_', ' ' | capitalize }}
|
||||
{% endif %}
|
||||
:</strong>
|
||||
{% if pair[0] == 'command' %}<code>{{ pair[1] }}</code>{% else %}{{ pair[1] }}{% endif %}
|
||||
</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<style>
|
||||
#leaderboard-title {
|
||||
margin-bottom: 20px; /* Add space below the title */
|
||||
}
|
||||
tr.selected {
|
||||
color: #0056b3;
|
||||
}
|
||||
table {
|
||||
table-layout: fixed;
|
||||
}
|
||||
thead {
|
||||
border-top: 1px solid #ddd; /* Add top border to header */
|
||||
}
|
||||
td, th {
|
||||
border: none; /* Remove internal cell borders */
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
vertical-align: middle; /* Ensure consistent vertical alignment */
|
||||
}
|
||||
tbody tr {
|
||||
height: 50px; /* Set a minimum height for all data rows */
|
||||
}
|
||||
td.col-command { /* Command column */
|
||||
font-size: 12px; /* Keep font size adjustment for command column if desired, or remove */
|
||||
}
|
||||
|
||||
/* Hide new columns first on smaller screens */
|
||||
@media screen and (max-width: 991px) {
|
||||
th.col-conform, td.col-conform,
|
||||
th.col-edit-format, td.col-edit-format {
|
||||
display: none;
|
||||
}
|
||||
/* Increase width of Percent correct and Cost columns when others are hidden */
|
||||
th:nth-child(3), td:nth-child(3), /* Percent correct */
|
||||
th:nth-child(4), td:nth-child(4) { /* Cost */
|
||||
width: 33% !important; /* Override inline style */
|
||||
}
|
||||
}
|
||||
|
||||
/* Hide command column on even smaller screens */
|
||||
@media screen and (max-width: 767px) {
|
||||
th.col-command, td.col-command { /* Command column */
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* --- Control Styles --- */
|
||||
#controls-container {
|
||||
margin-bottom: 20px; /* Add some space below controls */
|
||||
}
|
||||
|
||||
#editSearchInput, #view-mode-select {
|
||||
padding: 8px 12px; /* Consistent padding */
|
||||
border: 1px solid #ccc; /* Slightly softer border */
|
||||
border-radius: 4px;
|
||||
font-size: 14px; /* Match table font size */
|
||||
height: 38px; /* Match height */
|
||||
box-sizing: border-box; /* Include padding/border in height */
|
||||
}
|
||||
|
||||
|
||||
.bar-cell {
|
||||
position: relative; /* Positioning context for the bar */
|
||||
padding: 8px;
|
||||
/* text-align: center; Removed */
|
||||
overflow: hidden; /* Prevent bar from overflowing cell boundaries if needed */
|
||||
}
|
||||
.cost-bar-cell {
|
||||
background-image: none; /* Remove default gradient for cost cells */
|
||||
}
|
||||
.percent-tick, .cost-tick {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
transform: translateY(10px);
|
||||
height: 8px; /* Short tick */
|
||||
width: 1px;
|
||||
background-color: rgba(170, 170, 170, 0.5);
|
||||
z-index: 2; /* Above the bar but below the text */
|
||||
}
|
||||
.bar-viz {
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 50%; /* Position at the middle of the cell */
|
||||
transform: translateY(-50%); /* Center the bar vertically */
|
||||
z-index: 1; /* Above background, below ticks and text */
|
||||
height: 36px;
|
||||
border-radius: 0 2px 2px 0; /* Slightly rounded end corners */
|
||||
/* Width and colors are set inline via style attribute */
|
||||
}
|
||||
/* Add a tooltip class for showing cost information on hover */
|
||||
.cost-bar-cell:hover .bar-viz[style*="background-image"] {
|
||||
animation: stripe-animation 2s linear infinite;
|
||||
}
|
||||
@keyframes stripe-animation {
|
||||
0% { background-position: 0 0; }
|
||||
100% { background-position: 20px 0; }
|
||||
}
|
||||
.bar-cell span {
|
||||
position: absolute; /* Position relative to the cell */
|
||||
left: 5px; /* Position slightly inside the left edge */
|
||||
top: 50%; /* Center vertically */
|
||||
transform: translateY(-50%); /* Adjust vertical centering */
|
||||
z-index: 3; /* Ensure text is above everything else */
|
||||
background-color: rgba(255, 255, 255, 0.7); /* Semi-transparent white background */
|
||||
padding: 0 4px; /* Add padding around the text */
|
||||
border-radius: 3px; /* Rounded corners for the text background */
|
||||
font-size: 14px; /* Adjust font size for the numbers */
|
||||
}
|
||||
.toggle-details {
|
||||
color: #888; /* Make toggle symbol more subtle */
|
||||
transition: color 0.2s; /* Smooth transition on hover */
|
||||
}
|
||||
|
||||
|
||||
/* Style for selected rows */
|
||||
tr.row-selected > td {
|
||||
background-color: #e7f3ff; /* Example light blue highlight */
|
||||
}
|
||||
|
||||
/* Ensure checkbox is vertically aligned if needed */
|
||||
.row-selector {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
/* Hide rows not matching the filter */
|
||||
tr.hidden-by-mode {
|
||||
display: none !important; /* Use important to override other display styles if necessary */
|
||||
}
|
||||
tr.hidden-by-search {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* --- Mode Toggle Button Styles --- */
|
||||
#view-mode-toggle {
|
||||
height: 38px; /* Match input height */
|
||||
box-sizing: border-box;
|
||||
flex-shrink: 0; /* Prevent toggle from shrinking on small screens */
|
||||
}
|
||||
.mode-button {
|
||||
transition: background-color 0.2s ease-in-out, color 0.2s ease-in-out;
|
||||
white-space: nowrap; /* Prevent text wrapping */
|
||||
}
|
||||
.mode-button:not(.active) {
|
||||
background-color: #f8f9fa; /* Light grey background */
|
||||
color: #495057; /* Dark grey text */
|
||||
}
|
||||
.mode-button:not(.active):hover {
|
||||
background-color: #e2e6ea; /* Slightly darker grey on hover */
|
||||
}
|
||||
|
||||
/* Style for highlighted rows in view mode */
|
||||
tr.view-highlighted > td {
|
||||
background-color: #fffef5; /* Very light yellow/cream */
|
||||
/* Border moved to specific cell below */
|
||||
}
|
||||
/* Apply border and adjust padding ONLY for the first *visible* cell (Model name) in view mode */
|
||||
tr.view-highlighted > td:nth-child(2) {
|
||||
border-left: 4px solid #ffc107; /* Warning yellow border */
|
||||
/* Original padding is 8px. Subtract border width. */
|
||||
padding-left: 4px;
|
||||
}
|
||||
</style>
|
||||
|
||||
<script>
|
||||
const LEADERBOARD_CUSTOM_TITLE = "Qwen3 results on the aider polyglot benchmark";
|
||||
{% include leaderboard_table.js %}
|
||||
</script>
|
||||
|
||||
|
||||
## No think, via official Alibaba API
|
||||
|
||||
These results were obtained running against `https://dashscope.aliyuncs.com/compatible-mode/v1`
|
||||
with no thinking.
|
||||
|
||||
```bash
|
||||
export OPENAI_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
|
||||
export OPENAI_API_KEY=<key>
|
||||
```
|
||||
|
||||
```yaml
|
||||
- name: openai/qwen3-235b-a22b
|
||||
use_temperature: 0.7
|
||||
streaming: false
|
||||
extra_params:
|
||||
stream: false
|
||||
max_tokens: 16384
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
temperature: 0.7
|
||||
enable_thinking: false
|
||||
extra_body:
|
||||
enable_thinking: false
|
||||
```
|
||||
|
||||
## OpenRouter only TogetherAI, recommended /no_think settings
|
||||
|
||||
These results were obtained with the
|
||||
[recommended](https://huggingface.co/Qwen/Qwen3-235B-A22B#best-practices)
|
||||
non-thinking model settings in `.aider.model.settings.yml`:
|
||||
|
||||
```yaml
|
||||
- name: openrouter/qwen/qwen3-235b-a22b
|
||||
system_prompt_prefix: "/no_think"
|
||||
use_temperature: 0.7
|
||||
extra_params:
|
||||
max_tokens: 24000
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
min_p: 0.0
|
||||
temperature: 0.7
|
||||
extra_body:
|
||||
provider:
|
||||
order: ["Together"]
|
||||
```
|
||||
|
||||
And then running aider:
|
||||
|
||||
```bash
|
||||
aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
```
|
||||
|
||||
|
||||
## OpenRouter, all providers, default settings (thinking)
|
||||
|
||||
These results were obtained by simply running aider as shown below, without any model specific settings.
|
||||
This should have enabled thinking, assuming upstream API providers honor that convention for Qwen3.
|
||||
|
||||
```bash
|
||||
aider --model openrouter/qwen/qwen3-xxx
|
||||
```
|
||||
|
||||
## VLLM, bfloat16, recommended /no_think
|
||||
|
||||
These [benchmarks results were obtained by GitHub user AlongWY](https://github.com/Aider-AI/aider/pull/3908)
|
||||
with the
|
||||
[recommended](https://huggingface.co/Qwen/Qwen3-235B-A22B#best-practices)
|
||||
non-thinking model settings in `.aider.model.settings.yml`:
|
||||
|
||||
```yaml
|
||||
- name: openai/<model-name>
|
||||
system_prompt_prefix: "/no_think"
|
||||
use_temperature: 0.7
|
||||
extra_params:
|
||||
max_tokens: 24000
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
min_p: 0.0
|
||||
temperature: 0.7
|
||||
```
|
||||
|
||||
And then running aider:
|
||||
|
||||
```bash
|
||||
aider --model openai/<model-name> --openai-api-base <url>
|
||||
```
|
||||
BIN
aider/website/assets/2025-05-08-qwen3.jpg
Normal file
BIN
aider/website/assets/2025-05-08-qwen3.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 221 KiB |
BIN
aider/website/assets/Glass_TTY_VT220.ttf
Normal file
BIN
aider/website/assets/Glass_TTY_VT220.ttf
Normal file
Binary file not shown.
2366
aider/website/assets/asciinema/asciinema-player.css
Normal file
2366
aider/website/assets/asciinema/asciinema-player.css
Normal file
File diff suppressed because it is too large
Load Diff
1
aider/website/assets/asciinema/asciinema-player.min.js
vendored
Normal file
1
aider/website/assets/asciinema/asciinema-player.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
aider/website/assets/audio/auto-accept-architect/00-01.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/00-01.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/00-11.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/00-11.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/00-40.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/00-40.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/00-48.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/00-48.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/01-00.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/01-00.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/01-28.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/01-28.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/01-42.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/01-42.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/02-00.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/02-00.mp3
Normal file
Binary file not shown.
BIN
aider/website/assets/audio/auto-accept-architect/02-05.mp3
Normal file
BIN
aider/website/assets/audio/auto-accept-architect/02-05.mp3
Normal file
Binary file not shown.
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"00-01": "We're going to add a new feature to automatically accept edits proposed by the architect model.",
|
||||
"00-11": "First, let's add the new switch.",
|
||||
"00-40": "Aider figured out that it should be passed to the Coder class.",
|
||||
"00-48": "Now we need to implement the functionality.",
|
||||
"01-00": "Let's do some manual testing.",
|
||||
"01-28": "That worked. Let's make sure we can turn it off too.",
|
||||
"02-00": "Let's quickly tidy up the changes to HISTORY.",
|
||||
"02-05": "All done!",
|
||||
"01-42": "That worked too. Let's have aider update the HISTORY file to document the new feature."
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user